repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 39
1.84M
| func_code_tokens
listlengths 15
672k
| func_documentation_string
stringlengths 1
47.2k
| func_documentation_tokens
listlengths 1
3.92k
| split_name
stringclasses 1
value | func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|---|---|---|
poppy-project/pypot | pypot/primitive/manager.py | PrimitiveManager.stop | def stop(self):
""" Stop the primitive manager. """
for p in self.primitives[:]:
p.stop()
StoppableLoopThread.stop(self) | python | def stop(self):
for p in self.primitives[:]:
p.stop()
StoppableLoopThread.stop(self) | [
"def",
"stop",
"(",
"self",
")",
":",
"for",
"p",
"in",
"self",
".",
"primitives",
"[",
":",
"]",
":",
"p",
".",
"stop",
"(",
")",
"StoppableLoopThread",
".",
"stop",
"(",
"self",
")"
]
| Stop the primitive manager. | [
"Stop",
"the",
"primitive",
"manager",
"."
]
| train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/primitive/manager.py#L76-L81 |
poppy-project/pypot | pypot/vrep/io.py | VrepIO.load_scene | def load_scene(self, scene_path, start=False):
""" Loads a scene on the V-REP server.
:param str scene_path: path to a V-REP scene file
:param bool start: whether to directly start the simulation after loading the scene
.. note:: It is assumed that the scene file is always available on the server side.
"""
self.stop_simulation()
if not os.path.exists(scene_path):
raise IOError("No such file or directory: '{}'".format(scene_path))
self.call_remote_api('simxLoadScene', scene_path, True)
if start:
self.start_simulation() | python | def load_scene(self, scene_path, start=False):
self.stop_simulation()
if not os.path.exists(scene_path):
raise IOError("No such file or directory: '{}'".format(scene_path))
self.call_remote_api('simxLoadScene', scene_path, True)
if start:
self.start_simulation() | [
"def",
"load_scene",
"(",
"self",
",",
"scene_path",
",",
"start",
"=",
"False",
")",
":",
"self",
".",
"stop_simulation",
"(",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"scene_path",
")",
":",
"raise",
"IOError",
"(",
"\"No such file or directory: '{}'\"",
".",
"format",
"(",
"scene_path",
")",
")",
"self",
".",
"call_remote_api",
"(",
"'simxLoadScene'",
",",
"scene_path",
",",
"True",
")",
"if",
"start",
":",
"self",
".",
"start_simulation",
"(",
")"
]
| Loads a scene on the V-REP server.
:param str scene_path: path to a V-REP scene file
:param bool start: whether to directly start the simulation after loading the scene
.. note:: It is assumed that the scene file is always available on the server side. | [
"Loads",
"a",
"scene",
"on",
"the",
"V",
"-",
"REP",
"server",
"."
]
| train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/vrep/io.py#L91-L108 |
poppy-project/pypot | pypot/vrep/io.py | VrepIO.get_motor_position | def get_motor_position(self, motor_name):
""" Gets the motor current position. """
return self.call_remote_api('simxGetJointPosition',
self.get_object_handle(motor_name),
streaming=True) | python | def get_motor_position(self, motor_name):
return self.call_remote_api('simxGetJointPosition',
self.get_object_handle(motor_name),
streaming=True) | [
"def",
"get_motor_position",
"(",
"self",
",",
"motor_name",
")",
":",
"return",
"self",
".",
"call_remote_api",
"(",
"'simxGetJointPosition'",
",",
"self",
".",
"get_object_handle",
"(",
"motor_name",
")",
",",
"streaming",
"=",
"True",
")"
]
| Gets the motor current position. | [
"Gets",
"the",
"motor",
"current",
"position",
"."
]
| train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/vrep/io.py#L143-L147 |
poppy-project/pypot | pypot/vrep/io.py | VrepIO.set_motor_position | def set_motor_position(self, motor_name, position):
""" Sets the motor target position. """
self.call_remote_api('simxSetJointTargetPosition',
self.get_object_handle(motor_name),
position,
sending=True) | python | def set_motor_position(self, motor_name, position):
self.call_remote_api('simxSetJointTargetPosition',
self.get_object_handle(motor_name),
position,
sending=True) | [
"def",
"set_motor_position",
"(",
"self",
",",
"motor_name",
",",
"position",
")",
":",
"self",
".",
"call_remote_api",
"(",
"'simxSetJointTargetPosition'",
",",
"self",
".",
"get_object_handle",
"(",
"motor_name",
")",
",",
"position",
",",
"sending",
"=",
"True",
")"
]
| Sets the motor target position. | [
"Sets",
"the",
"motor",
"target",
"position",
"."
]
| train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/vrep/io.py#L149-L154 |
poppy-project/pypot | pypot/vrep/io.py | VrepIO.get_motor_force | def get_motor_force(self, motor_name):
""" Retrieves the force or torque applied to a joint along/about its active axis. """
return self.call_remote_api('simxGetJointForce',
self.get_object_handle(motor_name),
streaming=True) | python | def get_motor_force(self, motor_name):
return self.call_remote_api('simxGetJointForce',
self.get_object_handle(motor_name),
streaming=True) | [
"def",
"get_motor_force",
"(",
"self",
",",
"motor_name",
")",
":",
"return",
"self",
".",
"call_remote_api",
"(",
"'simxGetJointForce'",
",",
"self",
".",
"get_object_handle",
"(",
"motor_name",
")",
",",
"streaming",
"=",
"True",
")"
]
| Retrieves the force or torque applied to a joint along/about its active axis. | [
"Retrieves",
"the",
"force",
"or",
"torque",
"applied",
"to",
"a",
"joint",
"along",
"/",
"about",
"its",
"active",
"axis",
"."
]
| train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/vrep/io.py#L156-L160 |
poppy-project/pypot | pypot/vrep/io.py | VrepIO.set_motor_force | def set_motor_force(self, motor_name, force):
""" Sets the maximum force or torque that a joint can exert. """
self.call_remote_api('simxSetJointForce',
self.get_object_handle(motor_name),
force,
sending=True) | python | def set_motor_force(self, motor_name, force):
self.call_remote_api('simxSetJointForce',
self.get_object_handle(motor_name),
force,
sending=True) | [
"def",
"set_motor_force",
"(",
"self",
",",
"motor_name",
",",
"force",
")",
":",
"self",
".",
"call_remote_api",
"(",
"'simxSetJointForce'",
",",
"self",
".",
"get_object_handle",
"(",
"motor_name",
")",
",",
"force",
",",
"sending",
"=",
"True",
")"
]
| Sets the maximum force or torque that a joint can exert. | [
"Sets",
"the",
"maximum",
"force",
"or",
"torque",
"that",
"a",
"joint",
"can",
"exert",
"."
]
| train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/vrep/io.py#L162-L167 |
poppy-project/pypot | pypot/vrep/io.py | VrepIO.get_object_position | def get_object_position(self, object_name, relative_to_object=None):
""" Gets the object position. """
h = self.get_object_handle(object_name)
relative_handle = (-1 if relative_to_object is None
else self.get_object_handle(relative_to_object))
return self.call_remote_api('simxGetObjectPosition',
h, relative_handle,
streaming=True) | python | def get_object_position(self, object_name, relative_to_object=None):
h = self.get_object_handle(object_name)
relative_handle = (-1 if relative_to_object is None
else self.get_object_handle(relative_to_object))
return self.call_remote_api('simxGetObjectPosition',
h, relative_handle,
streaming=True) | [
"def",
"get_object_position",
"(",
"self",
",",
"object_name",
",",
"relative_to_object",
"=",
"None",
")",
":",
"h",
"=",
"self",
".",
"get_object_handle",
"(",
"object_name",
")",
"relative_handle",
"=",
"(",
"-",
"1",
"if",
"relative_to_object",
"is",
"None",
"else",
"self",
".",
"get_object_handle",
"(",
"relative_to_object",
")",
")",
"return",
"self",
".",
"call_remote_api",
"(",
"'simxGetObjectPosition'",
",",
"h",
",",
"relative_handle",
",",
"streaming",
"=",
"True",
")"
]
| Gets the object position. | [
"Gets",
"the",
"object",
"position",
"."
]
| train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/vrep/io.py#L169-L177 |
poppy-project/pypot | pypot/vrep/io.py | VrepIO.set_object_position | def set_object_position(self, object_name, position=[0, 0, 0]):
""" Sets the object position. """
h = self.get_object_handle(object_name)
return self.call_remote_api('simxSetObjectPosition',
h, -1, position,
sending=True) | python | def set_object_position(self, object_name, position=[0, 0, 0]):
h = self.get_object_handle(object_name)
return self.call_remote_api('simxSetObjectPosition',
h, -1, position,
sending=True) | [
"def",
"set_object_position",
"(",
"self",
",",
"object_name",
",",
"position",
"=",
"[",
"0",
",",
"0",
",",
"0",
"]",
")",
":",
"h",
"=",
"self",
".",
"get_object_handle",
"(",
"object_name",
")",
"return",
"self",
".",
"call_remote_api",
"(",
"'simxSetObjectPosition'",
",",
"h",
",",
"-",
"1",
",",
"position",
",",
"sending",
"=",
"True",
")"
]
| Sets the object position. | [
"Sets",
"the",
"object",
"position",
"."
]
| train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/vrep/io.py#L179-L185 |
poppy-project/pypot | pypot/vrep/io.py | VrepIO.get_object_handle | def get_object_handle(self, obj):
""" Gets the vrep object handle. """
if obj not in self._object_handles:
self._object_handles[obj] = self._get_object_handle(obj=obj)
return self._object_handles[obj] | python | def get_object_handle(self, obj):
if obj not in self._object_handles:
self._object_handles[obj] = self._get_object_handle(obj=obj)
return self._object_handles[obj] | [
"def",
"get_object_handle",
"(",
"self",
",",
"obj",
")",
":",
"if",
"obj",
"not",
"in",
"self",
".",
"_object_handles",
":",
"self",
".",
"_object_handles",
"[",
"obj",
"]",
"=",
"self",
".",
"_get_object_handle",
"(",
"obj",
"=",
"obj",
")",
"return",
"self",
".",
"_object_handles",
"[",
"obj",
"]"
]
| Gets the vrep object handle. | [
"Gets",
"the",
"vrep",
"object",
"handle",
"."
]
| train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/vrep/io.py#L200-L205 |
poppy-project/pypot | pypot/vrep/io.py | VrepIO.get_collision_state | def get_collision_state(self, collision_name):
""" Gets the collision state. """
return self.call_remote_api('simxReadCollision',
self.get_collision_handle(collision_name),
streaming=True) | python | def get_collision_state(self, collision_name):
return self.call_remote_api('simxReadCollision',
self.get_collision_handle(collision_name),
streaming=True) | [
"def",
"get_collision_state",
"(",
"self",
",",
"collision_name",
")",
":",
"return",
"self",
".",
"call_remote_api",
"(",
"'simxReadCollision'",
",",
"self",
".",
"get_collision_handle",
"(",
"collision_name",
")",
",",
"streaming",
"=",
"True",
")"
]
| Gets the collision state. | [
"Gets",
"the",
"collision",
"state",
"."
]
| train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/vrep/io.py#L207-L211 |
poppy-project/pypot | pypot/vrep/io.py | VrepIO.get_collision_handle | def get_collision_handle(self, collision):
""" Gets a vrep collisions handle. """
if collision not in self._object_handles:
h = self._get_collision_handle(collision)
self._object_handles[collision] = h
return self._object_handles[collision] | python | def get_collision_handle(self, collision):
if collision not in self._object_handles:
h = self._get_collision_handle(collision)
self._object_handles[collision] = h
return self._object_handles[collision] | [
"def",
"get_collision_handle",
"(",
"self",
",",
"collision",
")",
":",
"if",
"collision",
"not",
"in",
"self",
".",
"_object_handles",
":",
"h",
"=",
"self",
".",
"_get_collision_handle",
"(",
"collision",
")",
"self",
".",
"_object_handles",
"[",
"collision",
"]",
"=",
"h",
"return",
"self",
".",
"_object_handles",
"[",
"collision",
"]"
]
| Gets a vrep collisions handle. | [
"Gets",
"a",
"vrep",
"collisions",
"handle",
"."
]
| train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/vrep/io.py#L216-L222 |
poppy-project/pypot | pypot/vrep/io.py | VrepIO.add_cube | def add_cube(self, name, position, sizes, mass):
""" Add Cube """
self._create_pure_shape(0, 239, sizes, mass, [0, 0])
self.set_object_position("Cuboid", position)
self.change_object_name("Cuboid", name) | python | def add_cube(self, name, position, sizes, mass):
self._create_pure_shape(0, 239, sizes, mass, [0, 0])
self.set_object_position("Cuboid", position)
self.change_object_name("Cuboid", name) | [
"def",
"add_cube",
"(",
"self",
",",
"name",
",",
"position",
",",
"sizes",
",",
"mass",
")",
":",
"self",
".",
"_create_pure_shape",
"(",
"0",
",",
"239",
",",
"sizes",
",",
"mass",
",",
"[",
"0",
",",
"0",
"]",
")",
"self",
".",
"set_object_position",
"(",
"\"Cuboid\"",
",",
"position",
")",
"self",
".",
"change_object_name",
"(",
"\"Cuboid\"",
",",
"name",
")"
]
| Add Cube | [
"Add",
"Cube"
]
| train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/vrep/io.py#L231-L235 |
poppy-project/pypot | pypot/vrep/io.py | VrepIO.add_cylinder | def add_cylinder(self, name, position, sizes, mass, precision=[10, 10]):
""" Add Cylinder """
self._create_pure_shape(2, 239, sizes, mass, precision)
self.set_object_position("Cylinder", position)
self.change_object_name("Cylinder", name) | python | def add_cylinder(self, name, position, sizes, mass, precision=[10, 10]):
self._create_pure_shape(2, 239, sizes, mass, precision)
self.set_object_position("Cylinder", position)
self.change_object_name("Cylinder", name) | [
"def",
"add_cylinder",
"(",
"self",
",",
"name",
",",
"position",
",",
"sizes",
",",
"mass",
",",
"precision",
"=",
"[",
"10",
",",
"10",
"]",
")",
":",
"self",
".",
"_create_pure_shape",
"(",
"2",
",",
"239",
",",
"sizes",
",",
"mass",
",",
"precision",
")",
"self",
".",
"set_object_position",
"(",
"\"Cylinder\"",
",",
"position",
")",
"self",
".",
"change_object_name",
"(",
"\"Cylinder\"",
",",
"name",
")"
]
| Add Cylinder | [
"Add",
"Cylinder"
]
| train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/vrep/io.py#L243-L247 |
poppy-project/pypot | pypot/vrep/io.py | VrepIO.change_object_name | def change_object_name(self, old_name, new_name):
""" Change object name """
h = self._get_object_handle(old_name)
if old_name in self._object_handles:
self._object_handles.pop(old_name)
lua_code = "simSetObjectName({}, '{}')".format(h, new_name)
self._inject_lua_code(lua_code) | python | def change_object_name(self, old_name, new_name):
h = self._get_object_handle(old_name)
if old_name in self._object_handles:
self._object_handles.pop(old_name)
lua_code = "simSetObjectName({}, '{}')".format(h, new_name)
self._inject_lua_code(lua_code) | [
"def",
"change_object_name",
"(",
"self",
",",
"old_name",
",",
"new_name",
")",
":",
"h",
"=",
"self",
".",
"_get_object_handle",
"(",
"old_name",
")",
"if",
"old_name",
"in",
"self",
".",
"_object_handles",
":",
"self",
".",
"_object_handles",
".",
"pop",
"(",
"old_name",
")",
"lua_code",
"=",
"\"simSetObjectName({}, '{}')\"",
".",
"format",
"(",
"h",
",",
"new_name",
")",
"self",
".",
"_inject_lua_code",
"(",
"lua_code",
")"
]
| Change object name | [
"Change",
"object",
"name"
]
| train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/vrep/io.py#L255-L261 |
poppy-project/pypot | pypot/vrep/io.py | VrepIO._create_pure_shape | def _create_pure_shape(self, primitive_type, options, sizes, mass, precision):
""" Create Pure Shape """
lua_code = "simCreatePureShape({}, {}, {{{}, {}, {}}}, {}, {{{}, {}}})".format(
primitive_type, options, sizes[0], sizes[1], sizes[2], mass, precision[0], precision[1])
self._inject_lua_code(lua_code) | python | def _create_pure_shape(self, primitive_type, options, sizes, mass, precision):
lua_code = "simCreatePureShape({}, {}, {{{}, {}, {}}}, {}, {{{}, {}}})".format(
primitive_type, options, sizes[0], sizes[1], sizes[2], mass, precision[0], precision[1])
self._inject_lua_code(lua_code) | [
"def",
"_create_pure_shape",
"(",
"self",
",",
"primitive_type",
",",
"options",
",",
"sizes",
",",
"mass",
",",
"precision",
")",
":",
"lua_code",
"=",
"\"simCreatePureShape({}, {}, {{{}, {}, {}}}, {}, {{{}, {}}})\"",
".",
"format",
"(",
"primitive_type",
",",
"options",
",",
"sizes",
"[",
"0",
"]",
",",
"sizes",
"[",
"1",
"]",
",",
"sizes",
"[",
"2",
"]",
",",
"mass",
",",
"precision",
"[",
"0",
"]",
",",
"precision",
"[",
"1",
"]",
")",
"self",
".",
"_inject_lua_code",
"(",
"lua_code",
")"
]
| Create Pure Shape | [
"Create",
"Pure",
"Shape"
]
| train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/vrep/io.py#L263-L267 |
poppy-project/pypot | pypot/vrep/io.py | VrepIO._inject_lua_code | def _inject_lua_code(self, lua_code):
""" Sends raw lua code and evaluate it wihtout any checking! """
msg = (ctypes.c_ubyte * len(lua_code)).from_buffer_copy(lua_code.encode())
self.call_remote_api('simxWriteStringStream', 'my_lua_code', msg) | python | def _inject_lua_code(self, lua_code):
msg = (ctypes.c_ubyte * len(lua_code)).from_buffer_copy(lua_code.encode())
self.call_remote_api('simxWriteStringStream', 'my_lua_code', msg) | [
"def",
"_inject_lua_code",
"(",
"self",
",",
"lua_code",
")",
":",
"msg",
"=",
"(",
"ctypes",
".",
"c_ubyte",
"*",
"len",
"(",
"lua_code",
")",
")",
".",
"from_buffer_copy",
"(",
"lua_code",
".",
"encode",
"(",
")",
")",
"self",
".",
"call_remote_api",
"(",
"'simxWriteStringStream'",
",",
"'my_lua_code'",
",",
"msg",
")"
]
| Sends raw lua code and evaluate it wihtout any checking! | [
"Sends",
"raw",
"lua",
"code",
"and",
"evaluate",
"it",
"wihtout",
"any",
"checking!"
]
| train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/vrep/io.py#L269-L272 |
poppy-project/pypot | pypot/vrep/io.py | VrepIO.call_remote_api | def call_remote_api(self, func_name, *args, **kwargs):
""" Calls any remote API func in a thread_safe way.
:param str func_name: name of the remote API func to call
:param args: args to pass to the remote API call
:param kwargs: args to pass to the remote API call
.. note:: You can add an extra keyword to specify if you want to use the streaming or sending mode. The oneshot_wait mode is used by default (see `here <http://www.coppeliarobotics.com/helpFiles/en/remoteApiConstants.htm#operationModes>`_ for details about possible modes).
.. warning:: You should not pass the clientId and the operationMode as arguments. They will be automatically added.
As an example you can retrieve all joints name using the following call::
vrep_io.remote_api_call('simxGetObjectGroupData',
vrep_io.remote_api.sim_object_joint_type,
0,
streaming=True)
"""
f = getattr(remote_api, func_name)
mode = self._extract_mode(kwargs)
kwargs['operationMode'] = vrep_mode[mode]
# hard_retry = True
if '_force' in kwargs:
del kwargs['_force']
_force = True
else:
_force = False
for _ in range(VrepIO.MAX_ITER):
with self._lock:
ret = f(self.client_id, *args, **kwargs)
if _force:
return
if mode == 'sending' or isinstance(ret, int):
err, res = ret, None
else:
err, res = ret[0], ret[1:]
res = res[0] if len(res) == 1 else res
err = [bool((err >> i) & 1) for i in range(len(vrep_error))]
if remote_api.simx_return_novalue_flag not in err:
break
time.sleep(VrepIO.TIMEOUT)
# if any(err) and hard_retry:
# print "HARD RETRY"
# self.stop_simulation() #nope
#
# notconnected = True
# while notconnected:
# self.close()
# close_all_connections()
# time.sleep(0.5)
# try:
# self.open_io()
# notconnected = False
# except:
# print 'CONNECTION ERROR'
# pass
#
# self.start_simulation()
#
# with self._lock:
# ret = f(self.client_id, *args, **kwargs)
#
# if mode == 'sending' or isinstance(ret, int):
# err, res = ret, None
# else:
# err, res = ret[0], ret[1:]
# res = res[0] if len(res) == 1 else res
#
# err = [bool((err >> i) & 1) for i in range(len(vrep_error))]
#
# return res
if any(err):
msg = ' '.join([vrep_error[2 ** i]
for i, e in enumerate(err) if e])
raise VrepIOErrors(msg)
return res | python | def call_remote_api(self, func_name, *args, **kwargs):
f = getattr(remote_api, func_name)
mode = self._extract_mode(kwargs)
kwargs['operationMode'] = vrep_mode[mode]
if '_force' in kwargs:
del kwargs['_force']
_force = True
else:
_force = False
for _ in range(VrepIO.MAX_ITER):
with self._lock:
ret = f(self.client_id, *args, **kwargs)
if _force:
return
if mode == 'sending' or isinstance(ret, int):
err, res = ret, None
else:
err, res = ret[0], ret[1:]
res = res[0] if len(res) == 1 else res
err = [bool((err >> i) & 1) for i in range(len(vrep_error))]
if remote_api.simx_return_novalue_flag not in err:
break
time.sleep(VrepIO.TIMEOUT)
if any(err):
msg = ' '.join([vrep_error[2 ** i]
for i, e in enumerate(err) if e])
raise VrepIOErrors(msg)
return res | [
"def",
"call_remote_api",
"(",
"self",
",",
"func_name",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"f",
"=",
"getattr",
"(",
"remote_api",
",",
"func_name",
")",
"mode",
"=",
"self",
".",
"_extract_mode",
"(",
"kwargs",
")",
"kwargs",
"[",
"'operationMode'",
"]",
"=",
"vrep_mode",
"[",
"mode",
"]",
"# hard_retry = True",
"if",
"'_force'",
"in",
"kwargs",
":",
"del",
"kwargs",
"[",
"'_force'",
"]",
"_force",
"=",
"True",
"else",
":",
"_force",
"=",
"False",
"for",
"_",
"in",
"range",
"(",
"VrepIO",
".",
"MAX_ITER",
")",
":",
"with",
"self",
".",
"_lock",
":",
"ret",
"=",
"f",
"(",
"self",
".",
"client_id",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"_force",
":",
"return",
"if",
"mode",
"==",
"'sending'",
"or",
"isinstance",
"(",
"ret",
",",
"int",
")",
":",
"err",
",",
"res",
"=",
"ret",
",",
"None",
"else",
":",
"err",
",",
"res",
"=",
"ret",
"[",
"0",
"]",
",",
"ret",
"[",
"1",
":",
"]",
"res",
"=",
"res",
"[",
"0",
"]",
"if",
"len",
"(",
"res",
")",
"==",
"1",
"else",
"res",
"err",
"=",
"[",
"bool",
"(",
"(",
"err",
">>",
"i",
")",
"&",
"1",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"vrep_error",
")",
")",
"]",
"if",
"remote_api",
".",
"simx_return_novalue_flag",
"not",
"in",
"err",
":",
"break",
"time",
".",
"sleep",
"(",
"VrepIO",
".",
"TIMEOUT",
")",
"# if any(err) and hard_retry:",
"# print \"HARD RETRY\"",
"# self.stop_simulation() #nope",
"#",
"# notconnected = True",
"# while notconnected:",
"# self.close()",
"# close_all_connections()",
"# time.sleep(0.5)",
"# try:",
"# self.open_io()",
"# notconnected = False",
"# except:",
"# print 'CONNECTION ERROR'",
"# pass",
"#",
"# self.start_simulation()",
"#",
"# with self._lock:",
"# ret = f(self.client_id, *args, **kwargs)",
"#",
"# if mode == 'sending' or isinstance(ret, int):",
"# err, res = ret, None",
"# else:",
"# err, res = ret[0], ret[1:]",
"# res = res[0] if len(res) == 1 else res",
"#",
"# err = [bool((err >> i) & 1) for i in range(len(vrep_error))]",
"#",
"# return res",
"if",
"any",
"(",
"err",
")",
":",
"msg",
"=",
"' '",
".",
"join",
"(",
"[",
"vrep_error",
"[",
"2",
"**",
"i",
"]",
"for",
"i",
",",
"e",
"in",
"enumerate",
"(",
"err",
")",
"if",
"e",
"]",
")",
"raise",
"VrepIOErrors",
"(",
"msg",
")",
"return",
"res"
]
| Calls any remote API func in a thread_safe way.
:param str func_name: name of the remote API func to call
:param args: args to pass to the remote API call
:param kwargs: args to pass to the remote API call
.. note:: You can add an extra keyword to specify if you want to use the streaming or sending mode. The oneshot_wait mode is used by default (see `here <http://www.coppeliarobotics.com/helpFiles/en/remoteApiConstants.htm#operationModes>`_ for details about possible modes).
.. warning:: You should not pass the clientId and the operationMode as arguments. They will be automatically added.
As an example you can retrieve all joints name using the following call::
vrep_io.remote_api_call('simxGetObjectGroupData',
vrep_io.remote_api.sim_object_joint_type,
0,
streaming=True) | [
"Calls",
"any",
"remote",
"API",
"func",
"in",
"a",
"thread_safe",
"way",
"."
]
| train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/vrep/io.py#L274-L361 |
poppy-project/pypot | pypot/server/httpserver.py | HTTPRobotServer.run | def run(self, **kwargs):
""" Start the tornado server, run forever"""
try:
loop = IOLoop()
app = self.make_app()
app.listen(self.port)
loop.start()
except socket.error as serr:
# Re raise the socket error if not "[Errno 98] Address already in use"
if serr.errno != errno.EADDRINUSE:
raise serr
else:
logger.warning('The webserver port {} is already used. May be the HttpRobotServer is already running or another software is using this port.'.format(self.port)) | python | def run(self, **kwargs):
try:
loop = IOLoop()
app = self.make_app()
app.listen(self.port)
loop.start()
except socket.error as serr:
if serr.errno != errno.EADDRINUSE:
raise serr
else:
logger.warning('The webserver port {} is already used. May be the HttpRobotServer is already running or another software is using this port.'.format(self.port)) | [
"def",
"run",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"loop",
"=",
"IOLoop",
"(",
")",
"app",
"=",
"self",
".",
"make_app",
"(",
")",
"app",
".",
"listen",
"(",
"self",
".",
"port",
")",
"loop",
".",
"start",
"(",
")",
"except",
"socket",
".",
"error",
"as",
"serr",
":",
"# Re raise the socket error if not \"[Errno 98] Address already in use\"",
"if",
"serr",
".",
"errno",
"!=",
"errno",
".",
"EADDRINUSE",
":",
"raise",
"serr",
"else",
":",
"logger",
".",
"warning",
"(",
"'The webserver port {} is already used. May be the HttpRobotServer is already running or another software is using this port.'",
".",
"format",
"(",
"self",
".",
"port",
")",
")"
]
| Start the tornado server, run forever | [
"Start",
"the",
"tornado",
"server",
"run",
"forever"
]
| train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/server/httpserver.py#L253-L267 |
poppy-project/pypot | pypot/server/zmqserver.py | ZMQRobotServer.run | def run(self):
""" Run an infinite REQ/REP loop. """
while True:
req = self.socket.recv_json()
try:
answer = self.handle_request(req)
self.socket.send(json.dumps(answer))
except (AttributeError, TypeError) as e:
self.socket.send_json({'error': str(e)}) | python | def run(self):
while True:
req = self.socket.recv_json()
try:
answer = self.handle_request(req)
self.socket.send(json.dumps(answer))
except (AttributeError, TypeError) as e:
self.socket.send_json({'error': str(e)}) | [
"def",
"run",
"(",
"self",
")",
":",
"while",
"True",
":",
"req",
"=",
"self",
".",
"socket",
".",
"recv_json",
"(",
")",
"try",
":",
"answer",
"=",
"self",
".",
"handle_request",
"(",
"req",
")",
"self",
".",
"socket",
".",
"send",
"(",
"json",
".",
"dumps",
"(",
"answer",
")",
")",
"except",
"(",
"AttributeError",
",",
"TypeError",
")",
"as",
"e",
":",
"self",
".",
"socket",
".",
"send_json",
"(",
"{",
"'error'",
":",
"str",
"(",
"e",
")",
"}",
")"
]
| Run an infinite REQ/REP loop. | [
"Run",
"an",
"infinite",
"REQ",
"/",
"REP",
"loop",
"."
]
| train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/server/zmqserver.py#L26-L36 |
poppy-project/pypot | pypot/dynamixel/io/abstract_io.py | AbstractDxlIO.open | def open(self, port, baudrate=1000000, timeout=0.05):
""" Opens a new serial communication (closes the previous communication if needed).
:raises: :py:exc:`~pypot.dynamixel.io.DxlError` if the port is already used.
"""
self._open(port, baudrate, timeout)
logger.info("Opening port '%s'", self.port,
extra={'port': port,
'baudrate': baudrate,
'timeout': timeout}) | python | def open(self, port, baudrate=1000000, timeout=0.05):
self._open(port, baudrate, timeout)
logger.info("Opening port '%s'", self.port,
extra={'port': port,
'baudrate': baudrate,
'timeout': timeout}) | [
"def",
"open",
"(",
"self",
",",
"port",
",",
"baudrate",
"=",
"1000000",
",",
"timeout",
"=",
"0.05",
")",
":",
"self",
".",
"_open",
"(",
"port",
",",
"baudrate",
",",
"timeout",
")",
"logger",
".",
"info",
"(",
"\"Opening port '%s'\"",
",",
"self",
".",
"port",
",",
"extra",
"=",
"{",
"'port'",
":",
"port",
",",
"'baudrate'",
":",
"baudrate",
",",
"'timeout'",
":",
"timeout",
"}",
")"
]
| Opens a new serial communication (closes the previous communication if needed).
:raises: :py:exc:`~pypot.dynamixel.io.DxlError` if the port is already used. | [
"Opens",
"a",
"new",
"serial",
"communication",
"(",
"closes",
"the",
"previous",
"communication",
"if",
"needed",
")",
"."
]
| train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/dynamixel/io/abstract_io.py#L92-L102 |
poppy-project/pypot | pypot/dynamixel/io/abstract_io.py | AbstractDxlIO.close | def close(self, _force_lock=False):
""" Closes the serial communication if opened. """
if not self.closed:
with self.__force_lock(_force_lock) or self._serial_lock:
self._serial.close()
self.__used_ports.remove(self.port)
logger.info("Closing port '%s'", self.port,
extra={'port': self.port,
'baudrate': self.baudrate,
'timeout': self.timeout}) | python | def close(self, _force_lock=False):
if not self.closed:
with self.__force_lock(_force_lock) or self._serial_lock:
self._serial.close()
self.__used_ports.remove(self.port)
logger.info("Closing port '%s'", self.port,
extra={'port': self.port,
'baudrate': self.baudrate,
'timeout': self.timeout}) | [
"def",
"close",
"(",
"self",
",",
"_force_lock",
"=",
"False",
")",
":",
"if",
"not",
"self",
".",
"closed",
":",
"with",
"self",
".",
"__force_lock",
"(",
"_force_lock",
")",
"or",
"self",
".",
"_serial_lock",
":",
"self",
".",
"_serial",
".",
"close",
"(",
")",
"self",
".",
"__used_ports",
".",
"remove",
"(",
"self",
".",
"port",
")",
"logger",
".",
"info",
"(",
"\"Closing port '%s'\"",
",",
"self",
".",
"port",
",",
"extra",
"=",
"{",
"'port'",
":",
"self",
".",
"port",
",",
"'baudrate'",
":",
"self",
".",
"baudrate",
",",
"'timeout'",
":",
"self",
".",
"timeout",
"}",
")"
]
| Closes the serial communication if opened. | [
"Closes",
"the",
"serial",
"communication",
"if",
"opened",
"."
]
| train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/dynamixel/io/abstract_io.py#L145-L155 |
poppy-project/pypot | pypot/dynamixel/io/abstract_io.py | AbstractDxlIO.flush | def flush(self, _force_lock=False):
""" Flushes the serial communication (both input and output). """
if self.closed:
raise DxlError('attempt to flush a closed serial communication')
with self.__force_lock(_force_lock) or self._serial_lock:
self._serial.flushInput()
self._serial.flushOutput() | python | def flush(self, _force_lock=False):
if self.closed:
raise DxlError('attempt to flush a closed serial communication')
with self.__force_lock(_force_lock) or self._serial_lock:
self._serial.flushInput()
self._serial.flushOutput() | [
"def",
"flush",
"(",
"self",
",",
"_force_lock",
"=",
"False",
")",
":",
"if",
"self",
".",
"closed",
":",
"raise",
"DxlError",
"(",
"'attempt to flush a closed serial communication'",
")",
"with",
"self",
".",
"__force_lock",
"(",
"_force_lock",
")",
"or",
"self",
".",
"_serial_lock",
":",
"self",
".",
"_serial",
".",
"flushInput",
"(",
")",
"self",
".",
"_serial",
".",
"flushOutput",
"(",
")"
]
| Flushes the serial communication (both input and output). | [
"Flushes",
"the",
"serial",
"communication",
"(",
"both",
"input",
"and",
"output",
")",
"."
]
| train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/dynamixel/io/abstract_io.py#L157-L164 |
poppy-project/pypot | pypot/dynamixel/io/abstract_io.py | AbstractDxlIO.ping | def ping(self, id):
""" Pings the motor with the specified id.
.. note:: The motor id should always be included in [0, 253]. 254 is used for broadcast.
"""
pp = self._protocol.DxlPingPacket(id)
try:
self._send_packet(pp, error_handler=None)
return True
except DxlTimeoutError:
return False | python | def ping(self, id):
pp = self._protocol.DxlPingPacket(id)
try:
self._send_packet(pp, error_handler=None)
return True
except DxlTimeoutError:
return False | [
"def",
"ping",
"(",
"self",
",",
"id",
")",
":",
"pp",
"=",
"self",
".",
"_protocol",
".",
"DxlPingPacket",
"(",
"id",
")",
"try",
":",
"self",
".",
"_send_packet",
"(",
"pp",
",",
"error_handler",
"=",
"None",
")",
"return",
"True",
"except",
"DxlTimeoutError",
":",
"return",
"False"
]
| Pings the motor with the specified id.
.. note:: The motor id should always be included in [0, 253]. 254 is used for broadcast. | [
"Pings",
"the",
"motor",
"with",
"the",
"specified",
"id",
"."
]
| train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/dynamixel/io/abstract_io.py#L205-L217 |
poppy-project/pypot | pypot/dynamixel/io/abstract_io.py | AbstractDxlIO.scan | def scan(self, ids=range(254)):
""" Pings all ids within the specified list, by default it finds all the motors connected to the bus. """
return [id for id in ids if self.ping(id)] | python | def scan(self, ids=range(254)):
return [id for id in ids if self.ping(id)] | [
"def",
"scan",
"(",
"self",
",",
"ids",
"=",
"range",
"(",
"254",
")",
")",
":",
"return",
"[",
"id",
"for",
"id",
"in",
"ids",
"if",
"self",
".",
"ping",
"(",
"id",
")",
"]"
]
| Pings all ids within the specified list, by default it finds all the motors connected to the bus. | [
"Pings",
"all",
"ids",
"within",
"the",
"specified",
"list",
"by",
"default",
"it",
"finds",
"all",
"the",
"motors",
"connected",
"to",
"the",
"bus",
"."
]
| train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/dynamixel/io/abstract_io.py#L219-L221 |
poppy-project/pypot | pypot/dynamixel/io/abstract_io.py | AbstractDxlIO.get_model | def get_model(self, ids):
""" Gets the model for the specified motors. """
to_get_ids = [i for i in ids if i not in self._known_models]
models = [dxl_to_model(m) for m in self._get_model(to_get_ids, convert=False)]
self._known_models.update(zip(to_get_ids, models))
return tuple(self._known_models[id] for id in ids) | python | def get_model(self, ids):
to_get_ids = [i for i in ids if i not in self._known_models]
models = [dxl_to_model(m) for m in self._get_model(to_get_ids, convert=False)]
self._known_models.update(zip(to_get_ids, models))
return tuple(self._known_models[id] for id in ids) | [
"def",
"get_model",
"(",
"self",
",",
"ids",
")",
":",
"to_get_ids",
"=",
"[",
"i",
"for",
"i",
"in",
"ids",
"if",
"i",
"not",
"in",
"self",
".",
"_known_models",
"]",
"models",
"=",
"[",
"dxl_to_model",
"(",
"m",
")",
"for",
"m",
"in",
"self",
".",
"_get_model",
"(",
"to_get_ids",
",",
"convert",
"=",
"False",
")",
"]",
"self",
".",
"_known_models",
".",
"update",
"(",
"zip",
"(",
"to_get_ids",
",",
"models",
")",
")",
"return",
"tuple",
"(",
"self",
".",
"_known_models",
"[",
"id",
"]",
"for",
"id",
"in",
"ids",
")"
]
| Gets the model for the specified motors. | [
"Gets",
"the",
"model",
"for",
"the",
"specified",
"motors",
"."
]
| train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/dynamixel/io/abstract_io.py#L225-L231 |
poppy-project/pypot | pypot/dynamixel/io/abstract_io.py | AbstractDxlIO.change_id | def change_id(self, new_id_for_id):
""" Changes the id of the specified motors (each id must be unique on the bus). """
if len(set(new_id_for_id.values())) < len(new_id_for_id):
raise ValueError('each id must be unique.')
for new_id in new_id_for_id.itervalues():
if self.ping(new_id):
raise ValueError('id {} is already used.'.format(new_id))
self._change_id(new_id_for_id)
for motor_id, new_id in new_id_for_id.iteritems():
if motor_id in self._known_models:
self._known_models[new_id] = self._known_models[motor_id]
del self._known_models[motor_id]
if motor_id in self._known_mode:
self._known_mode[new_id] = self._known_mode[motor_id]
del self._known_mode[motor_id] | python | def change_id(self, new_id_for_id):
if len(set(new_id_for_id.values())) < len(new_id_for_id):
raise ValueError('each id must be unique.')
for new_id in new_id_for_id.itervalues():
if self.ping(new_id):
raise ValueError('id {} is already used.'.format(new_id))
self._change_id(new_id_for_id)
for motor_id, new_id in new_id_for_id.iteritems():
if motor_id in self._known_models:
self._known_models[new_id] = self._known_models[motor_id]
del self._known_models[motor_id]
if motor_id in self._known_mode:
self._known_mode[new_id] = self._known_mode[motor_id]
del self._known_mode[motor_id] | [
"def",
"change_id",
"(",
"self",
",",
"new_id_for_id",
")",
":",
"if",
"len",
"(",
"set",
"(",
"new_id_for_id",
".",
"values",
"(",
")",
")",
")",
"<",
"len",
"(",
"new_id_for_id",
")",
":",
"raise",
"ValueError",
"(",
"'each id must be unique.'",
")",
"for",
"new_id",
"in",
"new_id_for_id",
".",
"itervalues",
"(",
")",
":",
"if",
"self",
".",
"ping",
"(",
"new_id",
")",
":",
"raise",
"ValueError",
"(",
"'id {} is already used.'",
".",
"format",
"(",
"new_id",
")",
")",
"self",
".",
"_change_id",
"(",
"new_id_for_id",
")",
"for",
"motor_id",
",",
"new_id",
"in",
"new_id_for_id",
".",
"iteritems",
"(",
")",
":",
"if",
"motor_id",
"in",
"self",
".",
"_known_models",
":",
"self",
".",
"_known_models",
"[",
"new_id",
"]",
"=",
"self",
".",
"_known_models",
"[",
"motor_id",
"]",
"del",
"self",
".",
"_known_models",
"[",
"motor_id",
"]",
"if",
"motor_id",
"in",
"self",
".",
"_known_mode",
":",
"self",
".",
"_known_mode",
"[",
"new_id",
"]",
"=",
"self",
".",
"_known_mode",
"[",
"motor_id",
"]",
"del",
"self",
".",
"_known_mode",
"[",
"motor_id",
"]"
]
| Changes the id of the specified motors (each id must be unique on the bus). | [
"Changes",
"the",
"id",
"of",
"the",
"specified",
"motors",
"(",
"each",
"id",
"must",
"be",
"unique",
"on",
"the",
"bus",
")",
"."
]
| train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/dynamixel/io/abstract_io.py#L233-L250 |
poppy-project/pypot | pypot/dynamixel/io/abstract_io.py | AbstractDxlIO.change_baudrate | def change_baudrate(self, baudrate_for_ids):
""" Changes the baudrate of the specified motors. """
self._change_baudrate(baudrate_for_ids)
for motor_id in baudrate_for_ids:
if motor_id in self._known_models:
del self._known_models[motor_id]
if motor_id in self._known_mode:
del self._known_mode[motor_id] | python | def change_baudrate(self, baudrate_for_ids):
self._change_baudrate(baudrate_for_ids)
for motor_id in baudrate_for_ids:
if motor_id in self._known_models:
del self._known_models[motor_id]
if motor_id in self._known_mode:
del self._known_mode[motor_id] | [
"def",
"change_baudrate",
"(",
"self",
",",
"baudrate_for_ids",
")",
":",
"self",
".",
"_change_baudrate",
"(",
"baudrate_for_ids",
")",
"for",
"motor_id",
"in",
"baudrate_for_ids",
":",
"if",
"motor_id",
"in",
"self",
".",
"_known_models",
":",
"del",
"self",
".",
"_known_models",
"[",
"motor_id",
"]",
"if",
"motor_id",
"in",
"self",
".",
"_known_mode",
":",
"del",
"self",
".",
"_known_mode",
"[",
"motor_id",
"]"
]
| Changes the baudrate of the specified motors. | [
"Changes",
"the",
"baudrate",
"of",
"the",
"specified",
"motors",
"."
]
| train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/dynamixel/io/abstract_io.py#L252-L260 |
poppy-project/pypot | pypot/dynamixel/io/abstract_io.py | AbstractDxlIO.get_status_return_level | def get_status_return_level(self, ids, **kwargs):
""" Gets the status level for the specified motors. """
convert = kwargs['convert'] if 'convert' in kwargs else self._convert
srl = []
for id in ids:
try:
srl.extend(self._get_status_return_level((id, ),
error_handler=None, convert=convert))
except DxlTimeoutError as e:
if self.ping(id):
srl.append('never' if convert else 0)
else:
if self._error_handler:
self._error_handler.handle_timeout(e)
return ()
else:
raise e
return tuple(srl) | python | def get_status_return_level(self, ids, **kwargs):
convert = kwargs['convert'] if 'convert' in kwargs else self._convert
srl = []
for id in ids:
try:
srl.extend(self._get_status_return_level((id, ),
error_handler=None, convert=convert))
except DxlTimeoutError as e:
if self.ping(id):
srl.append('never' if convert else 0)
else:
if self._error_handler:
self._error_handler.handle_timeout(e)
return ()
else:
raise e
return tuple(srl) | [
"def",
"get_status_return_level",
"(",
"self",
",",
"ids",
",",
"*",
"*",
"kwargs",
")",
":",
"convert",
"=",
"kwargs",
"[",
"'convert'",
"]",
"if",
"'convert'",
"in",
"kwargs",
"else",
"self",
".",
"_convert",
"srl",
"=",
"[",
"]",
"for",
"id",
"in",
"ids",
":",
"try",
":",
"srl",
".",
"extend",
"(",
"self",
".",
"_get_status_return_level",
"(",
"(",
"id",
",",
")",
",",
"error_handler",
"=",
"None",
",",
"convert",
"=",
"convert",
")",
")",
"except",
"DxlTimeoutError",
"as",
"e",
":",
"if",
"self",
".",
"ping",
"(",
"id",
")",
":",
"srl",
".",
"append",
"(",
"'never'",
"if",
"convert",
"else",
"0",
")",
"else",
":",
"if",
"self",
".",
"_error_handler",
":",
"self",
".",
"_error_handler",
".",
"handle_timeout",
"(",
"e",
")",
"return",
"(",
")",
"else",
":",
"raise",
"e",
"return",
"tuple",
"(",
"srl",
")"
]
| Gets the status level for the specified motors. | [
"Gets",
"the",
"status",
"level",
"for",
"the",
"specified",
"motors",
"."
]
| train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/dynamixel/io/abstract_io.py#L262-L280 |
poppy-project/pypot | pypot/dynamixel/io/abstract_io.py | AbstractDxlIO.set_status_return_level | def set_status_return_level(self, srl_for_id, **kwargs):
""" Sets status return level to the specified motors. """
convert = kwargs['convert'] if 'convert' in kwargs else self._convert
if convert:
srl_for_id = dict(zip(srl_for_id.keys(),
[('never', 'read', 'always').index(s) for s in srl_for_id.values()]))
self._set_status_return_level(srl_for_id, convert=False) | python | def set_status_return_level(self, srl_for_id, **kwargs):
convert = kwargs['convert'] if 'convert' in kwargs else self._convert
if convert:
srl_for_id = dict(zip(srl_for_id.keys(),
[('never', 'read', 'always').index(s) for s in srl_for_id.values()]))
self._set_status_return_level(srl_for_id, convert=False) | [
"def",
"set_status_return_level",
"(",
"self",
",",
"srl_for_id",
",",
"*",
"*",
"kwargs",
")",
":",
"convert",
"=",
"kwargs",
"[",
"'convert'",
"]",
"if",
"'convert'",
"in",
"kwargs",
"else",
"self",
".",
"_convert",
"if",
"convert",
":",
"srl_for_id",
"=",
"dict",
"(",
"zip",
"(",
"srl_for_id",
".",
"keys",
"(",
")",
",",
"[",
"(",
"'never'",
",",
"'read'",
",",
"'always'",
")",
".",
"index",
"(",
"s",
")",
"for",
"s",
"in",
"srl_for_id",
".",
"values",
"(",
")",
"]",
")",
")",
"self",
".",
"_set_status_return_level",
"(",
"srl_for_id",
",",
"convert",
"=",
"False",
")"
]
| Sets status return level to the specified motors. | [
"Sets",
"status",
"return",
"level",
"to",
"the",
"specified",
"motors",
"."
]
| train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/dynamixel/io/abstract_io.py#L282-L288 |
poppy-project/pypot | pypot/dynamixel/io/abstract_io.py | AbstractDxlIO.switch_led_on | def switch_led_on(self, ids):
""" Switches on the LED of the motors with the specified ids. """
self._set_LED(dict(zip(ids, itertools.repeat(True)))) | python | def switch_led_on(self, ids):
self._set_LED(dict(zip(ids, itertools.repeat(True)))) | [
"def",
"switch_led_on",
"(",
"self",
",",
"ids",
")",
":",
"self",
".",
"_set_LED",
"(",
"dict",
"(",
"zip",
"(",
"ids",
",",
"itertools",
".",
"repeat",
"(",
"True",
")",
")",
")",
")"
]
| Switches on the LED of the motors with the specified ids. | [
"Switches",
"on",
"the",
"LED",
"of",
"the",
"motors",
"with",
"the",
"specified",
"ids",
"."
]
| train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/dynamixel/io/abstract_io.py#L290-L292 |
poppy-project/pypot | pypot/dynamixel/io/abstract_io.py | AbstractDxlIO.switch_led_off | def switch_led_off(self, ids):
""" Switches off the LED of the motors with the specified ids. """
self._set_LED(dict(zip(ids, itertools.repeat(False)))) | python | def switch_led_off(self, ids):
self._set_LED(dict(zip(ids, itertools.repeat(False)))) | [
"def",
"switch_led_off",
"(",
"self",
",",
"ids",
")",
":",
"self",
".",
"_set_LED",
"(",
"dict",
"(",
"zip",
"(",
"ids",
",",
"itertools",
".",
"repeat",
"(",
"False",
")",
")",
")",
")"
]
| Switches off the LED of the motors with the specified ids. | [
"Switches",
"off",
"the",
"LED",
"of",
"the",
"motors",
"with",
"the",
"specified",
"ids",
"."
]
| train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/dynamixel/io/abstract_io.py#L294-L296 |
poppy-project/pypot | pypot/dynamixel/io/abstract_io.py | AbstractDxlIO.enable_torque | def enable_torque(self, ids):
""" Enables torque of the motors with the specified ids. """
self._set_torque_enable(dict(zip(ids, itertools.repeat(True)))) | python | def enable_torque(self, ids):
self._set_torque_enable(dict(zip(ids, itertools.repeat(True)))) | [
"def",
"enable_torque",
"(",
"self",
",",
"ids",
")",
":",
"self",
".",
"_set_torque_enable",
"(",
"dict",
"(",
"zip",
"(",
"ids",
",",
"itertools",
".",
"repeat",
"(",
"True",
")",
")",
")",
")"
]
| Enables torque of the motors with the specified ids. | [
"Enables",
"torque",
"of",
"the",
"motors",
"with",
"the",
"specified",
"ids",
"."
]
| train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/dynamixel/io/abstract_io.py#L298-L300 |
poppy-project/pypot | pypot/dynamixel/io/abstract_io.py | AbstractDxlIO.disable_torque | def disable_torque(self, ids):
""" Disables torque of the motors with the specified ids. """
self._set_torque_enable(dict(zip(ids, itertools.repeat(False)))) | python | def disable_torque(self, ids):
self._set_torque_enable(dict(zip(ids, itertools.repeat(False)))) | [
"def",
"disable_torque",
"(",
"self",
",",
"ids",
")",
":",
"self",
".",
"_set_torque_enable",
"(",
"dict",
"(",
"zip",
"(",
"ids",
",",
"itertools",
".",
"repeat",
"(",
"False",
")",
")",
")",
")"
]
| Disables torque of the motors with the specified ids. | [
"Disables",
"torque",
"of",
"the",
"motors",
"with",
"the",
"specified",
"ids",
"."
]
| train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/dynamixel/io/abstract_io.py#L302-L304 |
poppy-project/pypot | pypot/dynamixel/io/abstract_io.py | AbstractDxlIO.get_pid_gain | def get_pid_gain(self, ids, **kwargs):
""" Gets the pid gain for the specified motors. """
return tuple([tuple(reversed(t)) for t in self._get_pid_gain(ids, **kwargs)]) | python | def get_pid_gain(self, ids, **kwargs):
return tuple([tuple(reversed(t)) for t in self._get_pid_gain(ids, **kwargs)]) | [
"def",
"get_pid_gain",
"(",
"self",
",",
"ids",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"tuple",
"(",
"[",
"tuple",
"(",
"reversed",
"(",
"t",
")",
")",
"for",
"t",
"in",
"self",
".",
"_get_pid_gain",
"(",
"ids",
",",
"*",
"*",
"kwargs",
")",
"]",
")"
]
| Gets the pid gain for the specified motors. | [
"Gets",
"the",
"pid",
"gain",
"for",
"the",
"specified",
"motors",
"."
]
| train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/dynamixel/io/abstract_io.py#L306-L308 |
poppy-project/pypot | pypot/dynamixel/io/abstract_io.py | AbstractDxlIO.set_pid_gain | def set_pid_gain(self, pid_for_id, **kwargs):
""" Sets the pid gain to the specified motors. """
pid_for_id = dict(itertools.izip(pid_for_id.iterkeys(),
[tuple(reversed(t)) for t in pid_for_id.values()]))
self._set_pid_gain(pid_for_id, **kwargs) | python | def set_pid_gain(self, pid_for_id, **kwargs):
pid_for_id = dict(itertools.izip(pid_for_id.iterkeys(),
[tuple(reversed(t)) for t in pid_for_id.values()]))
self._set_pid_gain(pid_for_id, **kwargs) | [
"def",
"set_pid_gain",
"(",
"self",
",",
"pid_for_id",
",",
"*",
"*",
"kwargs",
")",
":",
"pid_for_id",
"=",
"dict",
"(",
"itertools",
".",
"izip",
"(",
"pid_for_id",
".",
"iterkeys",
"(",
")",
",",
"[",
"tuple",
"(",
"reversed",
"(",
"t",
")",
")",
"for",
"t",
"in",
"pid_for_id",
".",
"values",
"(",
")",
"]",
")",
")",
"self",
".",
"_set_pid_gain",
"(",
"pid_for_id",
",",
"*",
"*",
"kwargs",
")"
]
| Sets the pid gain to the specified motors. | [
"Sets",
"the",
"pid",
"gain",
"to",
"the",
"specified",
"motors",
"."
]
| train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/dynamixel/io/abstract_io.py#L310-L314 |
poppy-project/pypot | pypot/dynamixel/io/abstract_io.py | AbstractDxlIO.get_control_table | def get_control_table(self, ids, **kwargs):
""" Gets the full control table for the specified motors.
..note:: This function requires the model for each motor to be known. Querring this additional information might add some extra delay.
"""
error_handler = kwargs['error_handler'] if ('error_handler' in kwargs) else self._error_handler
convert = kwargs['convert'] if ('convert' in kwargs) else self._convert
bl = ('goal position speed load', 'present position speed load')
controls = [c for c in self._AbstractDxlIO__controls if c.name not in bl]
res = []
for id, model in zip(ids, self.get_model(ids)):
controls = [c for c in controls if model in c.models]
controls = sorted(controls, key=lambda c: c.address)
address = controls[0].address
length = controls[-1].address + controls[-1].nb_elem * controls[-1].length
rp = self._protocol.DxlReadDataPacket(id, address, length)
sp = self._send_packet(rp, error_handler=error_handler)
d = OrderedDict()
for c in controls:
v = dxl_decode_all(sp.parameters[c.address:c.address + c.nb_elem * c.length], c.nb_elem)
d[c.name] = c.dxl_to_si(v, model) if convert else v
res.append(d)
return tuple(res) | python | def get_control_table(self, ids, **kwargs):
error_handler = kwargs['error_handler'] if ('error_handler' in kwargs) else self._error_handler
convert = kwargs['convert'] if ('convert' in kwargs) else self._convert
bl = ('goal position speed load', 'present position speed load')
controls = [c for c in self._AbstractDxlIO__controls if c.name not in bl]
res = []
for id, model in zip(ids, self.get_model(ids)):
controls = [c for c in controls if model in c.models]
controls = sorted(controls, key=lambda c: c.address)
address = controls[0].address
length = controls[-1].address + controls[-1].nb_elem * controls[-1].length
rp = self._protocol.DxlReadDataPacket(id, address, length)
sp = self._send_packet(rp, error_handler=error_handler)
d = OrderedDict()
for c in controls:
v = dxl_decode_all(sp.parameters[c.address:c.address + c.nb_elem * c.length], c.nb_elem)
d[c.name] = c.dxl_to_si(v, model) if convert else v
res.append(d)
return tuple(res) | [
"def",
"get_control_table",
"(",
"self",
",",
"ids",
",",
"*",
"*",
"kwargs",
")",
":",
"error_handler",
"=",
"kwargs",
"[",
"'error_handler'",
"]",
"if",
"(",
"'error_handler'",
"in",
"kwargs",
")",
"else",
"self",
".",
"_error_handler",
"convert",
"=",
"kwargs",
"[",
"'convert'",
"]",
"if",
"(",
"'convert'",
"in",
"kwargs",
")",
"else",
"self",
".",
"_convert",
"bl",
"=",
"(",
"'goal position speed load'",
",",
"'present position speed load'",
")",
"controls",
"=",
"[",
"c",
"for",
"c",
"in",
"self",
".",
"_AbstractDxlIO__controls",
"if",
"c",
".",
"name",
"not",
"in",
"bl",
"]",
"res",
"=",
"[",
"]",
"for",
"id",
",",
"model",
"in",
"zip",
"(",
"ids",
",",
"self",
".",
"get_model",
"(",
"ids",
")",
")",
":",
"controls",
"=",
"[",
"c",
"for",
"c",
"in",
"controls",
"if",
"model",
"in",
"c",
".",
"models",
"]",
"controls",
"=",
"sorted",
"(",
"controls",
",",
"key",
"=",
"lambda",
"c",
":",
"c",
".",
"address",
")",
"address",
"=",
"controls",
"[",
"0",
"]",
".",
"address",
"length",
"=",
"controls",
"[",
"-",
"1",
"]",
".",
"address",
"+",
"controls",
"[",
"-",
"1",
"]",
".",
"nb_elem",
"*",
"controls",
"[",
"-",
"1",
"]",
".",
"length",
"rp",
"=",
"self",
".",
"_protocol",
".",
"DxlReadDataPacket",
"(",
"id",
",",
"address",
",",
"length",
")",
"sp",
"=",
"self",
".",
"_send_packet",
"(",
"rp",
",",
"error_handler",
"=",
"error_handler",
")",
"d",
"=",
"OrderedDict",
"(",
")",
"for",
"c",
"in",
"controls",
":",
"v",
"=",
"dxl_decode_all",
"(",
"sp",
".",
"parameters",
"[",
"c",
".",
"address",
":",
"c",
".",
"address",
"+",
"c",
".",
"nb_elem",
"*",
"c",
".",
"length",
"]",
",",
"c",
".",
"nb_elem",
")",
"d",
"[",
"c",
".",
"name",
"]",
"=",
"c",
".",
"dxl_to_si",
"(",
"v",
",",
"model",
")",
"if",
"convert",
"else",
"v",
"res",
".",
"append",
"(",
"d",
")",
"return",
"tuple",
"(",
"res",
")"
]
| Gets the full control table for the specified motors.
..note:: This function requires the model for each motor to be known. Querring this additional information might add some extra delay. | [
"Gets",
"the",
"full",
"control",
"table",
"for",
"the",
"specified",
"motors",
"."
]
| train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/dynamixel/io/abstract_io.py#L318-L350 |
poppy-project/pypot | pypot/utils/interpolation.py | KDTreeDict.nearest_keys | def nearest_keys(self, key):
"""Find the nearest_keys (l2 distance) thanks to a cKDTree query"""
if not isinstance(key, tuple):
_key = (key,)
if self.__stale:
self.generate_tree()
d, idx = self.__tree.query(
_key, self.k_neighbors, distance_upper_bound=self.distance_upper_bound)
try:
return [self.__keys[id][0] for id in idx if id < len(self.__keys)]
except TypeError:
# if k_neighbors = 1 query is not returnng arrays
return self.__keys[idx] | python | def nearest_keys(self, key):
if not isinstance(key, tuple):
_key = (key,)
if self.__stale:
self.generate_tree()
d, idx = self.__tree.query(
_key, self.k_neighbors, distance_upper_bound=self.distance_upper_bound)
try:
return [self.__keys[id][0] for id in idx if id < len(self.__keys)]
except TypeError:
return self.__keys[idx] | [
"def",
"nearest_keys",
"(",
"self",
",",
"key",
")",
":",
"if",
"not",
"isinstance",
"(",
"key",
",",
"tuple",
")",
":",
"_key",
"=",
"(",
"key",
",",
")",
"if",
"self",
".",
"__stale",
":",
"self",
".",
"generate_tree",
"(",
")",
"d",
",",
"idx",
"=",
"self",
".",
"__tree",
".",
"query",
"(",
"_key",
",",
"self",
".",
"k_neighbors",
",",
"distance_upper_bound",
"=",
"self",
".",
"distance_upper_bound",
")",
"try",
":",
"return",
"[",
"self",
".",
"__keys",
"[",
"id",
"]",
"[",
"0",
"]",
"for",
"id",
"in",
"idx",
"if",
"id",
"<",
"len",
"(",
"self",
".",
"__keys",
")",
"]",
"except",
"TypeError",
":",
"# if k_neighbors = 1 query is not returnng arrays",
"return",
"self",
".",
"__keys",
"[",
"idx",
"]"
]
| Find the nearest_keys (l2 distance) thanks to a cKDTree query | [
"Find",
"the",
"nearest_keys",
"(",
"l2",
"distance",
")",
"thanks",
"to",
"a",
"cKDTree",
"query"
]
| train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/utils/interpolation.py#L50-L63 |
poppy-project/pypot | pypot/utils/interpolation.py | KDTreeDict.interpolate_motor_positions | def interpolate_motor_positions(self, input_key, nearest_keys):
""" Process linear interpolation to estimate actual speed and position of motors
Method specific to the :meth:~pypot.primitive.move.Move.position() structure
it is a KDTreeDict[timestamp] = {dict[motor]=(position,speed)}
"""
# TODO : to be rewrited with more style (map ?)
if len(nearest_keys) == 1:
return self[nearest_keys[0]]
elif len(nearest_keys) == 0:
raise KeyError('key {} exceed distance_upper_bound {}'.format(
input_key, self.distance_upper_bound))
elif len(nearest_keys) != 2:
raise NotImplementedError("interpolation works only for k_neighbors = 2")
elif nearest_keys[0] == nearest_keys[1]:
# Bug from nearest key ?
return self[nearest_keys[0]]
# Problem if ValueError: A value in x_new is above the interpolation range.
elif input_key < min(nearest_keys):
return self[min(nearest_keys)]
elif input_key > max(nearest_keys):
return self[max(nearest_keys)]
interpolated_positions = {}
for (k, v), (k2, v2) in zip(self[nearest_keys[0]].items(), self[nearest_keys[1]].items()):
if k == k2:
x = np.array(nearest_keys)
y_pos = np.array([v[0], v2[0]])
y_speed = np.array([v[1], v2[1]])
f_pos = interp1d(x, y_pos, bounds_error=False)
f_speed = interp1d(x, y_speed, bounds_error=False)
# print k, input_key, (float(f_pos(input_key[0])), float(f_speed(input_key[0])))
interpolated_positions[k] = (f_pos(input_key), f_speed(input_key))
else:
raise IndexError("key are not identics. Motor added during the record ?")
return interpolated_positions | python | def interpolate_motor_positions(self, input_key, nearest_keys):
if len(nearest_keys) == 1:
return self[nearest_keys[0]]
elif len(nearest_keys) == 0:
raise KeyError('key {} exceed distance_upper_bound {}'.format(
input_key, self.distance_upper_bound))
elif len(nearest_keys) != 2:
raise NotImplementedError("interpolation works only for k_neighbors = 2")
elif nearest_keys[0] == nearest_keys[1]:
return self[nearest_keys[0]]
elif input_key < min(nearest_keys):
return self[min(nearest_keys)]
elif input_key > max(nearest_keys):
return self[max(nearest_keys)]
interpolated_positions = {}
for (k, v), (k2, v2) in zip(self[nearest_keys[0]].items(), self[nearest_keys[1]].items()):
if k == k2:
x = np.array(nearest_keys)
y_pos = np.array([v[0], v2[0]])
y_speed = np.array([v[1], v2[1]])
f_pos = interp1d(x, y_pos, bounds_error=False)
f_speed = interp1d(x, y_speed, bounds_error=False)
interpolated_positions[k] = (f_pos(input_key), f_speed(input_key))
else:
raise IndexError("key are not identics. Motor added during the record ?")
return interpolated_positions | [
"def",
"interpolate_motor_positions",
"(",
"self",
",",
"input_key",
",",
"nearest_keys",
")",
":",
"# TODO : to be rewrited with more style (map ?)",
"if",
"len",
"(",
"nearest_keys",
")",
"==",
"1",
":",
"return",
"self",
"[",
"nearest_keys",
"[",
"0",
"]",
"]",
"elif",
"len",
"(",
"nearest_keys",
")",
"==",
"0",
":",
"raise",
"KeyError",
"(",
"'key {} exceed distance_upper_bound {}'",
".",
"format",
"(",
"input_key",
",",
"self",
".",
"distance_upper_bound",
")",
")",
"elif",
"len",
"(",
"nearest_keys",
")",
"!=",
"2",
":",
"raise",
"NotImplementedError",
"(",
"\"interpolation works only for k_neighbors = 2\"",
")",
"elif",
"nearest_keys",
"[",
"0",
"]",
"==",
"nearest_keys",
"[",
"1",
"]",
":",
"# Bug from nearest key ?",
"return",
"self",
"[",
"nearest_keys",
"[",
"0",
"]",
"]",
"# Problem if ValueError: A value in x_new is above the interpolation range.",
"elif",
"input_key",
"<",
"min",
"(",
"nearest_keys",
")",
":",
"return",
"self",
"[",
"min",
"(",
"nearest_keys",
")",
"]",
"elif",
"input_key",
">",
"max",
"(",
"nearest_keys",
")",
":",
"return",
"self",
"[",
"max",
"(",
"nearest_keys",
")",
"]",
"interpolated_positions",
"=",
"{",
"}",
"for",
"(",
"k",
",",
"v",
")",
",",
"(",
"k2",
",",
"v2",
")",
"in",
"zip",
"(",
"self",
"[",
"nearest_keys",
"[",
"0",
"]",
"]",
".",
"items",
"(",
")",
",",
"self",
"[",
"nearest_keys",
"[",
"1",
"]",
"]",
".",
"items",
"(",
")",
")",
":",
"if",
"k",
"==",
"k2",
":",
"x",
"=",
"np",
".",
"array",
"(",
"nearest_keys",
")",
"y_pos",
"=",
"np",
".",
"array",
"(",
"[",
"v",
"[",
"0",
"]",
",",
"v2",
"[",
"0",
"]",
"]",
")",
"y_speed",
"=",
"np",
".",
"array",
"(",
"[",
"v",
"[",
"1",
"]",
",",
"v2",
"[",
"1",
"]",
"]",
")",
"f_pos",
"=",
"interp1d",
"(",
"x",
",",
"y_pos",
",",
"bounds_error",
"=",
"False",
")",
"f_speed",
"=",
"interp1d",
"(",
"x",
",",
"y_speed",
",",
"bounds_error",
"=",
"False",
")",
"# print k, input_key, (float(f_pos(input_key[0])), float(f_speed(input_key[0])))",
"interpolated_positions",
"[",
"k",
"]",
"=",
"(",
"f_pos",
"(",
"input_key",
")",
",",
"f_speed",
"(",
"input_key",
")",
")",
"else",
":",
"raise",
"IndexError",
"(",
"\"key are not identics. Motor added during the record ?\"",
")",
"return",
"interpolated_positions"
]
| Process linear interpolation to estimate actual speed and position of motors
Method specific to the :meth:~pypot.primitive.move.Move.position() structure
it is a KDTreeDict[timestamp] = {dict[motor]=(position,speed)} | [
"Process",
"linear",
"interpolation",
"to",
"estimate",
"actual",
"speed",
"and",
"position",
"of",
"motors",
"Method",
"specific",
"to",
"the",
":",
"meth",
":",
"~pypot",
".",
"primitive",
".",
"move",
".",
"Move",
".",
"position",
"()",
"structure",
"it",
"is",
"a",
"KDTreeDict",
"[",
"timestamp",
"]",
"=",
"{",
"dict",
"[",
"motor",
"]",
"=",
"(",
"position",
"speed",
")",
"}"
]
| train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/utils/interpolation.py#L65-L102 |
poppy-project/pypot | pypot/utils/flushed_print.py | flushed_print | def flushed_print(*args, **kwargs):
"""
Use to replace print(*args, flush=True) that doesn't exist for python<3.3
"""
print(*args, **kwargs)
file = kwargs.get('file', sys.stdout)
file.flush() if file is not None else sys.stdout.flush() | python | def flushed_print(*args, **kwargs):
print(*args, **kwargs)
file = kwargs.get('file', sys.stdout)
file.flush() if file is not None else sys.stdout.flush() | [
"def",
"flushed_print",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"print",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"file",
"=",
"kwargs",
".",
"get",
"(",
"'file'",
",",
"sys",
".",
"stdout",
")",
"file",
".",
"flush",
"(",
")",
"if",
"file",
"is",
"not",
"None",
"else",
"sys",
".",
"stdout",
".",
"flush",
"(",
")"
]
| Use to replace print(*args, flush=True) that doesn't exist for python<3.3 | [
"Use",
"to",
"replace",
"print",
"(",
"*",
"args",
"flush",
"=",
"True",
")",
"that",
"doesn",
"t",
"exist",
"for",
"python<3",
".",
"3"
]
| train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/utils/flushed_print.py#L5-L11 |
poppy-project/pypot | pypot/robot/config.py | from_config | def from_config(config, strict=True, sync=True, use_dummy_io=False, **extra):
""" Returns a :class:`~pypot.robot.robot.Robot` instance created from a configuration dictionnary.
:param dict config: robot configuration dictionary
:param bool strict: make sure that all ports, motors are availaible.
:param bool sync: choose if automatically starts the synchronization loops
For details on how to write such a configuration dictionnary, you should refer to the section :ref:`config_file`.
"""
logger.info('Loading config... ', extra={'config': config})
alias = config['motorgroups']
# Instatiate the different motor controllers
controllers = []
for c_name, c_params in config['controllers'].items():
motor_names = sum([_motor_extractor(alias, name)
for name in c_params['attached_motors']], [])
attached_motors = [motor_from_confignode(config, name)
for name in motor_names]
# at least one of the motor is set as broken
if [m for m in attached_motors if m._broken]:
strict = False
attached_ids = [m.id for m in attached_motors]
if not use_dummy_io:
dxl_io = dxl_io_from_confignode(config, c_params, attached_ids, strict)
check_motor_eprom_configuration(config, dxl_io, motor_names)
logger.info('Instantiating controller on %s with motors %s',
dxl_io.port, motor_names,
extra={'config': config})
syncloop = (c_params['syncloop'] if 'syncloop' in c_params
else 'BaseDxlController')
SyncLoopCls = getattr(pypot.dynamixel.syncloop, syncloop)
c = SyncLoopCls(dxl_io, attached_motors)
controllers.append(c)
else:
controllers.append(DummyController(attached_motors))
try:
robot = Robot(motor_controllers=controllers, sync=sync)
except RuntimeError:
for c in controllers:
c.io.close()
raise
make_alias(config, robot)
# Create all sensors and attached them
try:
if 'sensors' in config and not use_dummy_io:
sensors = []
for s_name in config['sensors'].keys():
if s_name in extra and extra[s_name] == 'dummy':
config['sensors'][s_name]['type'] = 'Dummy{}'.format(s_name.capitalize())
sensor = sensor_from_confignode(config, s_name, robot)
setattr(robot, s_name, sensor)
sensors.append(sensor)
robot.sensors.append(sensor)
[s.start() for s in sensors if hasattr(s, 'start')]
# If anything goes wrong when adding sensors
# We have to make sure we close the robot properly
# Otherwise trying to open it again will fail.
except Exception:
robot.close()
raise
logger.info('Loading complete!',
extra={'config': config})
return robot | python | def from_config(config, strict=True, sync=True, use_dummy_io=False, **extra):
logger.info('Loading config... ', extra={'config': config})
alias = config['motorgroups']
controllers = []
for c_name, c_params in config['controllers'].items():
motor_names = sum([_motor_extractor(alias, name)
for name in c_params['attached_motors']], [])
attached_motors = [motor_from_confignode(config, name)
for name in motor_names]
if [m for m in attached_motors if m._broken]:
strict = False
attached_ids = [m.id for m in attached_motors]
if not use_dummy_io:
dxl_io = dxl_io_from_confignode(config, c_params, attached_ids, strict)
check_motor_eprom_configuration(config, dxl_io, motor_names)
logger.info('Instantiating controller on %s with motors %s',
dxl_io.port, motor_names,
extra={'config': config})
syncloop = (c_params['syncloop'] if 'syncloop' in c_params
else 'BaseDxlController')
SyncLoopCls = getattr(pypot.dynamixel.syncloop, syncloop)
c = SyncLoopCls(dxl_io, attached_motors)
controllers.append(c)
else:
controllers.append(DummyController(attached_motors))
try:
robot = Robot(motor_controllers=controllers, sync=sync)
except RuntimeError:
for c in controllers:
c.io.close()
raise
make_alias(config, robot)
try:
if 'sensors' in config and not use_dummy_io:
sensors = []
for s_name in config['sensors'].keys():
if s_name in extra and extra[s_name] == 'dummy':
config['sensors'][s_name]['type'] = 'Dummy{}'.format(s_name.capitalize())
sensor = sensor_from_confignode(config, s_name, robot)
setattr(robot, s_name, sensor)
sensors.append(sensor)
robot.sensors.append(sensor)
[s.start() for s in sensors if hasattr(s, 'start')]
except Exception:
robot.close()
raise
logger.info('Loading complete!',
extra={'config': config})
return robot | [
"def",
"from_config",
"(",
"config",
",",
"strict",
"=",
"True",
",",
"sync",
"=",
"True",
",",
"use_dummy_io",
"=",
"False",
",",
"*",
"*",
"extra",
")",
":",
"logger",
".",
"info",
"(",
"'Loading config... '",
",",
"extra",
"=",
"{",
"'config'",
":",
"config",
"}",
")",
"alias",
"=",
"config",
"[",
"'motorgroups'",
"]",
"# Instatiate the different motor controllers",
"controllers",
"=",
"[",
"]",
"for",
"c_name",
",",
"c_params",
"in",
"config",
"[",
"'controllers'",
"]",
".",
"items",
"(",
")",
":",
"motor_names",
"=",
"sum",
"(",
"[",
"_motor_extractor",
"(",
"alias",
",",
"name",
")",
"for",
"name",
"in",
"c_params",
"[",
"'attached_motors'",
"]",
"]",
",",
"[",
"]",
")",
"attached_motors",
"=",
"[",
"motor_from_confignode",
"(",
"config",
",",
"name",
")",
"for",
"name",
"in",
"motor_names",
"]",
"# at least one of the motor is set as broken",
"if",
"[",
"m",
"for",
"m",
"in",
"attached_motors",
"if",
"m",
".",
"_broken",
"]",
":",
"strict",
"=",
"False",
"attached_ids",
"=",
"[",
"m",
".",
"id",
"for",
"m",
"in",
"attached_motors",
"]",
"if",
"not",
"use_dummy_io",
":",
"dxl_io",
"=",
"dxl_io_from_confignode",
"(",
"config",
",",
"c_params",
",",
"attached_ids",
",",
"strict",
")",
"check_motor_eprom_configuration",
"(",
"config",
",",
"dxl_io",
",",
"motor_names",
")",
"logger",
".",
"info",
"(",
"'Instantiating controller on %s with motors %s'",
",",
"dxl_io",
".",
"port",
",",
"motor_names",
",",
"extra",
"=",
"{",
"'config'",
":",
"config",
"}",
")",
"syncloop",
"=",
"(",
"c_params",
"[",
"'syncloop'",
"]",
"if",
"'syncloop'",
"in",
"c_params",
"else",
"'BaseDxlController'",
")",
"SyncLoopCls",
"=",
"getattr",
"(",
"pypot",
".",
"dynamixel",
".",
"syncloop",
",",
"syncloop",
")",
"c",
"=",
"SyncLoopCls",
"(",
"dxl_io",
",",
"attached_motors",
")",
"controllers",
".",
"append",
"(",
"c",
")",
"else",
":",
"controllers",
".",
"append",
"(",
"DummyController",
"(",
"attached_motors",
")",
")",
"try",
":",
"robot",
"=",
"Robot",
"(",
"motor_controllers",
"=",
"controllers",
",",
"sync",
"=",
"sync",
")",
"except",
"RuntimeError",
":",
"for",
"c",
"in",
"controllers",
":",
"c",
".",
"io",
".",
"close",
"(",
")",
"raise",
"make_alias",
"(",
"config",
",",
"robot",
")",
"# Create all sensors and attached them",
"try",
":",
"if",
"'sensors'",
"in",
"config",
"and",
"not",
"use_dummy_io",
":",
"sensors",
"=",
"[",
"]",
"for",
"s_name",
"in",
"config",
"[",
"'sensors'",
"]",
".",
"keys",
"(",
")",
":",
"if",
"s_name",
"in",
"extra",
"and",
"extra",
"[",
"s_name",
"]",
"==",
"'dummy'",
":",
"config",
"[",
"'sensors'",
"]",
"[",
"s_name",
"]",
"[",
"'type'",
"]",
"=",
"'Dummy{}'",
".",
"format",
"(",
"s_name",
".",
"capitalize",
"(",
")",
")",
"sensor",
"=",
"sensor_from_confignode",
"(",
"config",
",",
"s_name",
",",
"robot",
")",
"setattr",
"(",
"robot",
",",
"s_name",
",",
"sensor",
")",
"sensors",
".",
"append",
"(",
"sensor",
")",
"robot",
".",
"sensors",
".",
"append",
"(",
"sensor",
")",
"[",
"s",
".",
"start",
"(",
")",
"for",
"s",
"in",
"sensors",
"if",
"hasattr",
"(",
"s",
",",
"'start'",
")",
"]",
"# If anything goes wrong when adding sensors",
"# We have to make sure we close the robot properly",
"# Otherwise trying to open it again will fail.",
"except",
"Exception",
":",
"robot",
".",
"close",
"(",
")",
"raise",
"logger",
".",
"info",
"(",
"'Loading complete!'",
",",
"extra",
"=",
"{",
"'config'",
":",
"config",
"}",
")",
"return",
"robot"
]
| Returns a :class:`~pypot.robot.robot.Robot` instance created from a configuration dictionnary.
:param dict config: robot configuration dictionary
:param bool strict: make sure that all ports, motors are availaible.
:param bool sync: choose if automatically starts the synchronization loops
For details on how to write such a configuration dictionnary, you should refer to the section :ref:`config_file`. | [
"Returns",
"a",
":",
"class",
":",
"~pypot",
".",
"robot",
".",
"robot",
".",
"Robot",
"instance",
"created",
"from",
"a",
"configuration",
"dictionnary",
"."
]
| train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/robot/config.py#L33-L114 |
poppy-project/pypot | pypot/robot/config.py | check_motor_eprom_configuration | def check_motor_eprom_configuration(config, dxl_io, motor_names):
""" Change the angles limits depanding on the robot configuration ;
Check if the return delay time is set to 0.
"""
changed_angle_limits = {}
changed_return_delay_time = {}
for name in motor_names:
m = config['motors'][name]
id = m['id']
try:
old_limits = dxl_io.get_angle_limit((id, ))[0]
old_return_delay_time = dxl_io.get_return_delay_time((id, ))[0]
except IndexError: # probably a broken motor so we just skip
continue
if old_return_delay_time != 0:
logger.warning("Return delay time of %s changed from %s to 0",
name, old_return_delay_time)
changed_return_delay_time[id] = 0
new_limits = m['angle_limit']
if 'wheel_mode' in m and m['wheel_mode']:
dxl_io.set_wheel_mode([m['id']])
time.sleep(0.5)
else:
# TODO: we probably need a better fix for this.
# dxl_io.set_joint_mode([m['id']])
d = numpy.linalg.norm(numpy.asarray(new_limits) - numpy.asarray(old_limits))
if d > 1:
logger.warning("Limits of '%s' changed from %s to %s",
name, old_limits, new_limits,
extra={'config': config})
changed_angle_limits[id] = new_limits
if changed_angle_limits:
dxl_io.set_angle_limit(changed_angle_limits)
time.sleep(0.5)
if changed_return_delay_time:
dxl_io.set_return_delay_time(changed_return_delay_time)
time.sleep(0.5) | python | def check_motor_eprom_configuration(config, dxl_io, motor_names):
changed_angle_limits = {}
changed_return_delay_time = {}
for name in motor_names:
m = config['motors'][name]
id = m['id']
try:
old_limits = dxl_io.get_angle_limit((id, ))[0]
old_return_delay_time = dxl_io.get_return_delay_time((id, ))[0]
except IndexError:
continue
if old_return_delay_time != 0:
logger.warning("Return delay time of %s changed from %s to 0",
name, old_return_delay_time)
changed_return_delay_time[id] = 0
new_limits = m['angle_limit']
if 'wheel_mode' in m and m['wheel_mode']:
dxl_io.set_wheel_mode([m['id']])
time.sleep(0.5)
else:
d = numpy.linalg.norm(numpy.asarray(new_limits) - numpy.asarray(old_limits))
if d > 1:
logger.warning("Limits of '%s' changed from %s to %s",
name, old_limits, new_limits,
extra={'config': config})
changed_angle_limits[id] = new_limits
if changed_angle_limits:
dxl_io.set_angle_limit(changed_angle_limits)
time.sleep(0.5)
if changed_return_delay_time:
dxl_io.set_return_delay_time(changed_return_delay_time)
time.sleep(0.5) | [
"def",
"check_motor_eprom_configuration",
"(",
"config",
",",
"dxl_io",
",",
"motor_names",
")",
":",
"changed_angle_limits",
"=",
"{",
"}",
"changed_return_delay_time",
"=",
"{",
"}",
"for",
"name",
"in",
"motor_names",
":",
"m",
"=",
"config",
"[",
"'motors'",
"]",
"[",
"name",
"]",
"id",
"=",
"m",
"[",
"'id'",
"]",
"try",
":",
"old_limits",
"=",
"dxl_io",
".",
"get_angle_limit",
"(",
"(",
"id",
",",
")",
")",
"[",
"0",
"]",
"old_return_delay_time",
"=",
"dxl_io",
".",
"get_return_delay_time",
"(",
"(",
"id",
",",
")",
")",
"[",
"0",
"]",
"except",
"IndexError",
":",
"# probably a broken motor so we just skip",
"continue",
"if",
"old_return_delay_time",
"!=",
"0",
":",
"logger",
".",
"warning",
"(",
"\"Return delay time of %s changed from %s to 0\"",
",",
"name",
",",
"old_return_delay_time",
")",
"changed_return_delay_time",
"[",
"id",
"]",
"=",
"0",
"new_limits",
"=",
"m",
"[",
"'angle_limit'",
"]",
"if",
"'wheel_mode'",
"in",
"m",
"and",
"m",
"[",
"'wheel_mode'",
"]",
":",
"dxl_io",
".",
"set_wheel_mode",
"(",
"[",
"m",
"[",
"'id'",
"]",
"]",
")",
"time",
".",
"sleep",
"(",
"0.5",
")",
"else",
":",
"# TODO: we probably need a better fix for this.",
"# dxl_io.set_joint_mode([m['id']])",
"d",
"=",
"numpy",
".",
"linalg",
".",
"norm",
"(",
"numpy",
".",
"asarray",
"(",
"new_limits",
")",
"-",
"numpy",
".",
"asarray",
"(",
"old_limits",
")",
")",
"if",
"d",
">",
"1",
":",
"logger",
".",
"warning",
"(",
"\"Limits of '%s' changed from %s to %s\"",
",",
"name",
",",
"old_limits",
",",
"new_limits",
",",
"extra",
"=",
"{",
"'config'",
":",
"config",
"}",
")",
"changed_angle_limits",
"[",
"id",
"]",
"=",
"new_limits",
"if",
"changed_angle_limits",
":",
"dxl_io",
".",
"set_angle_limit",
"(",
"changed_angle_limits",
")",
"time",
".",
"sleep",
"(",
"0.5",
")",
"if",
"changed_return_delay_time",
":",
"dxl_io",
".",
"set_return_delay_time",
"(",
"changed_return_delay_time",
")",
"time",
".",
"sleep",
"(",
"0.5",
")"
]
| Change the angles limits depanding on the robot configuration ;
Check if the return delay time is set to 0. | [
"Change",
"the",
"angles",
"limits",
"depanding",
"on",
"the",
"robot",
"configuration",
";",
"Check",
"if",
"the",
"return",
"delay",
"time",
"is",
"set",
"to",
"0",
"."
]
| train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/robot/config.py#L209-L252 |
poppy-project/pypot | pypot/robot/config.py | from_json | def from_json(json_file, sync=True, strict=True, use_dummy_io=False, **extra):
""" Returns a :class:`~pypot.robot.robot.Robot` instance created from a JSON configuration file.
For details on how to write such a configuration file, you should refer to the section :ref:`config_file`.
"""
with open(json_file) as f:
config = json.load(f, object_pairs_hook=OrderedDict)
return from_config(config, sync=sync, strict=strict, use_dummy_io=use_dummy_io, **extra) | python | def from_json(json_file, sync=True, strict=True, use_dummy_io=False, **extra):
with open(json_file) as f:
config = json.load(f, object_pairs_hook=OrderedDict)
return from_config(config, sync=sync, strict=strict, use_dummy_io=use_dummy_io, **extra) | [
"def",
"from_json",
"(",
"json_file",
",",
"sync",
"=",
"True",
",",
"strict",
"=",
"True",
",",
"use_dummy_io",
"=",
"False",
",",
"*",
"*",
"extra",
")",
":",
"with",
"open",
"(",
"json_file",
")",
"as",
"f",
":",
"config",
"=",
"json",
".",
"load",
"(",
"f",
",",
"object_pairs_hook",
"=",
"OrderedDict",
")",
"return",
"from_config",
"(",
"config",
",",
"sync",
"=",
"sync",
",",
"strict",
"=",
"strict",
",",
"use_dummy_io",
"=",
"use_dummy_io",
",",
"*",
"*",
"extra",
")"
]
| Returns a :class:`~pypot.robot.robot.Robot` instance created from a JSON configuration file.
For details on how to write such a configuration file, you should refer to the section :ref:`config_file`. | [
"Returns",
"a",
":",
"class",
":",
"~pypot",
".",
"robot",
".",
"robot",
".",
"Robot",
"instance",
"created",
"from",
"a",
"JSON",
"configuration",
"file",
"."
]
| train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/robot/config.py#L295-L304 |
poppy-project/pypot | pypot/server/rest.py | RESTRobot.stop_move_recorder | def stop_move_recorder(self, move_name):
"""Allow more easily than stop_primitive() to save in a filename the recorded move"""
recorder = getattr(self.robot, '_{}_recorder'.format(move_name))
recorder.stop()
with open('{}.record'.format(move_name), 'w') as f:
recorder.move.save(f)
# Stop player if running : to discuss
# Recording a playing move can produce strange outputs, but could be a good feature
try:
player = getattr(self.robot, '_{}_player'.format(move_name))
if player.running:
player.stop()
except AttributeError:
pass | python | def stop_move_recorder(self, move_name):
recorder = getattr(self.robot, '_{}_recorder'.format(move_name))
recorder.stop()
with open('{}.record'.format(move_name), 'w') as f:
recorder.move.save(f)
try:
player = getattr(self.robot, '_{}_player'.format(move_name))
if player.running:
player.stop()
except AttributeError:
pass | [
"def",
"stop_move_recorder",
"(",
"self",
",",
"move_name",
")",
":",
"recorder",
"=",
"getattr",
"(",
"self",
".",
"robot",
",",
"'_{}_recorder'",
".",
"format",
"(",
"move_name",
")",
")",
"recorder",
".",
"stop",
"(",
")",
"with",
"open",
"(",
"'{}.record'",
".",
"format",
"(",
"move_name",
")",
",",
"'w'",
")",
"as",
"f",
":",
"recorder",
".",
"move",
".",
"save",
"(",
"f",
")",
"# Stop player if running : to discuss",
"# Recording a playing move can produce strange outputs, but could be a good feature",
"try",
":",
"player",
"=",
"getattr",
"(",
"self",
".",
"robot",
",",
"'_{}_player'",
".",
"format",
"(",
"move_name",
")",
")",
"if",
"player",
".",
"running",
":",
"player",
".",
"stop",
"(",
")",
"except",
"AttributeError",
":",
"pass"
]
| Allow more easily than stop_primitive() to save in a filename the recorded move | [
"Allow",
"more",
"easily",
"than",
"stop_primitive",
"()",
"to",
"save",
"in",
"a",
"filename",
"the",
"recorded",
"move"
]
| train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/server/rest.py#L147-L161 |
poppy-project/pypot | pypot/server/rest.py | RESTRobot.start_move_player | def start_move_player(self, move_name, speed=1.0, backwards=False):
"""Move player need to have a move file
<move_name.record> in the working directory to play it"""
# check if running
try:
player = getattr(self.robot, '_{}_player'.format(move_name))
if player.running:
return
except AttributeError:
pass
# if not running, override the play primitive
with open('{}.record'.format(move_name)) as f:
loaded_move = Move.load(f)
player = MovePlayer(self.robot, loaded_move, play_speed=speed, backwards=backwards)
self.robot.attach_primitive(player, '_{}_player'.format(move_name))
player.start()
return player.duration() | python | def start_move_player(self, move_name, speed=1.0, backwards=False):
try:
player = getattr(self.robot, '_{}_player'.format(move_name))
if player.running:
return
except AttributeError:
pass
with open('{}.record'.format(move_name)) as f:
loaded_move = Move.load(f)
player = MovePlayer(self.robot, loaded_move, play_speed=speed, backwards=backwards)
self.robot.attach_primitive(player, '_{}_player'.format(move_name))
player.start()
return player.duration() | [
"def",
"start_move_player",
"(",
"self",
",",
"move_name",
",",
"speed",
"=",
"1.0",
",",
"backwards",
"=",
"False",
")",
":",
"# check if running",
"try",
":",
"player",
"=",
"getattr",
"(",
"self",
".",
"robot",
",",
"'_{}_player'",
".",
"format",
"(",
"move_name",
")",
")",
"if",
"player",
".",
"running",
":",
"return",
"except",
"AttributeError",
":",
"pass",
"# if not running, override the play primitive",
"with",
"open",
"(",
"'{}.record'",
".",
"format",
"(",
"move_name",
")",
")",
"as",
"f",
":",
"loaded_move",
"=",
"Move",
".",
"load",
"(",
"f",
")",
"player",
"=",
"MovePlayer",
"(",
"self",
".",
"robot",
",",
"loaded_move",
",",
"play_speed",
"=",
"speed",
",",
"backwards",
"=",
"backwards",
")",
"self",
".",
"robot",
".",
"attach_primitive",
"(",
"player",
",",
"'_{}_player'",
".",
"format",
"(",
"move_name",
")",
")",
"player",
".",
"start",
"(",
")",
"return",
"player",
".",
"duration",
"(",
")"
]
| Move player need to have a move file
<move_name.record> in the working directory to play it | [
"Move",
"player",
"need",
"to",
"have",
"a",
"move",
"file",
"<move_name",
".",
"record",
">",
"in",
"the",
"working",
"directory",
"to",
"play",
"it"
]
| train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/server/rest.py#L163-L182 |
icometrix/dicom2nifti | dicom2nifti/compressed_dicom.py | _get_gdcmconv | def _get_gdcmconv():
"""
Get the full path to gdcmconv.
If not found raise error
"""
gdcmconv_executable = settings.gdcmconv_path
if gdcmconv_executable is None:
gdcmconv_executable = _which('gdcmconv')
if gdcmconv_executable is None:
gdcmconv_executable = _which('gdcmconv.exe')
if gdcmconv_executable is None:
raise ConversionError('GDCMCONV_NOT_FOUND')
return gdcmconv_executable | python | def _get_gdcmconv():
gdcmconv_executable = settings.gdcmconv_path
if gdcmconv_executable is None:
gdcmconv_executable = _which('gdcmconv')
if gdcmconv_executable is None:
gdcmconv_executable = _which('gdcmconv.exe')
if gdcmconv_executable is None:
raise ConversionError('GDCMCONV_NOT_FOUND')
return gdcmconv_executable | [
"def",
"_get_gdcmconv",
"(",
")",
":",
"gdcmconv_executable",
"=",
"settings",
".",
"gdcmconv_path",
"if",
"gdcmconv_executable",
"is",
"None",
":",
"gdcmconv_executable",
"=",
"_which",
"(",
"'gdcmconv'",
")",
"if",
"gdcmconv_executable",
"is",
"None",
":",
"gdcmconv_executable",
"=",
"_which",
"(",
"'gdcmconv.exe'",
")",
"if",
"gdcmconv_executable",
"is",
"None",
":",
"raise",
"ConversionError",
"(",
"'GDCMCONV_NOT_FOUND'",
")",
"return",
"gdcmconv_executable"
]
| Get the full path to gdcmconv.
If not found raise error | [
"Get",
"the",
"full",
"path",
"to",
"gdcmconv",
".",
"If",
"not",
"found",
"raise",
"error"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/compressed_dicom.py#L41-L55 |
icometrix/dicom2nifti | dicom2nifti/compressed_dicom.py | compress_directory | def compress_directory(dicom_directory):
"""
This function can be used to convert a folder of jpeg compressed images to an uncompressed ones
:param dicom_directory: directory of dicom files to compress
"""
if _is_compressed(dicom_directory):
return
logger.info('Compressing dicom files in %s' % dicom_directory)
for root, _, files in os.walk(dicom_directory):
for dicom_file in files:
if is_dicom_file(os.path.join(root, dicom_file)):
_compress_dicom(os.path.join(root, dicom_file)) | python | def compress_directory(dicom_directory):
if _is_compressed(dicom_directory):
return
logger.info('Compressing dicom files in %s' % dicom_directory)
for root, _, files in os.walk(dicom_directory):
for dicom_file in files:
if is_dicom_file(os.path.join(root, dicom_file)):
_compress_dicom(os.path.join(root, dicom_file)) | [
"def",
"compress_directory",
"(",
"dicom_directory",
")",
":",
"if",
"_is_compressed",
"(",
"dicom_directory",
")",
":",
"return",
"logger",
".",
"info",
"(",
"'Compressing dicom files in %s'",
"%",
"dicom_directory",
")",
"for",
"root",
",",
"_",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"dicom_directory",
")",
":",
"for",
"dicom_file",
"in",
"files",
":",
"if",
"is_dicom_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"dicom_file",
")",
")",
":",
"_compress_dicom",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"dicom_file",
")",
")"
]
| This function can be used to convert a folder of jpeg compressed images to an uncompressed ones
:param dicom_directory: directory of dicom files to compress | [
"This",
"function",
"can",
"be",
"used",
"to",
"convert",
"a",
"folder",
"of",
"jpeg",
"compressed",
"images",
"to",
"an",
"uncompressed",
"ones"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/compressed_dicom.py#L58-L71 |
icometrix/dicom2nifti | dicom2nifti/compressed_dicom.py | is_dicom_file | def is_dicom_file(filename):
"""
Util function to check if file is a dicom file
the first 128 bytes are preamble
the next 4 bytes should contain DICM otherwise it is not a dicom
:param filename: file to check for the DICM header block
:type filename: six.string_types
:returns: True if it is a dicom file
"""
file_stream = open(filename, 'rb')
file_stream.seek(128)
data = file_stream.read(4)
file_stream.close()
if data == b'DICM':
return True
if settings.pydicom_read_force:
try:
dicom_headers = pydicom.read_file(filename, defer_size="1 KB", stop_before_pixels=True, force=True)
if dicom_headers is not None:
return True
except:
pass
return False | python | def is_dicom_file(filename):
file_stream = open(filename, 'rb')
file_stream.seek(128)
data = file_stream.read(4)
file_stream.close()
if data == b'DICM':
return True
if settings.pydicom_read_force:
try:
dicom_headers = pydicom.read_file(filename, defer_size="1 KB", stop_before_pixels=True, force=True)
if dicom_headers is not None:
return True
except:
pass
return False | [
"def",
"is_dicom_file",
"(",
"filename",
")",
":",
"file_stream",
"=",
"open",
"(",
"filename",
",",
"'rb'",
")",
"file_stream",
".",
"seek",
"(",
"128",
")",
"data",
"=",
"file_stream",
".",
"read",
"(",
"4",
")",
"file_stream",
".",
"close",
"(",
")",
"if",
"data",
"==",
"b'DICM'",
":",
"return",
"True",
"if",
"settings",
".",
"pydicom_read_force",
":",
"try",
":",
"dicom_headers",
"=",
"pydicom",
".",
"read_file",
"(",
"filename",
",",
"defer_size",
"=",
"\"1 KB\"",
",",
"stop_before_pixels",
"=",
"True",
",",
"force",
"=",
"True",
")",
"if",
"dicom_headers",
"is",
"not",
"None",
":",
"return",
"True",
"except",
":",
"pass",
"return",
"False"
]
| Util function to check if file is a dicom file
the first 128 bytes are preamble
the next 4 bytes should contain DICM otherwise it is not a dicom
:param filename: file to check for the DICM header block
:type filename: six.string_types
:returns: True if it is a dicom file | [
"Util",
"function",
"to",
"check",
"if",
"file",
"is",
"a",
"dicom",
"file",
"the",
"first",
"128",
"bytes",
"are",
"preamble",
"the",
"next",
"4",
"bytes",
"should",
"contain",
"DICM",
"otherwise",
"it",
"is",
"not",
"a",
"dicom"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/compressed_dicom.py#L74-L97 |
icometrix/dicom2nifti | dicom2nifti/compressed_dicom.py | _is_compressed | def _is_compressed(dicom_file, force=False):
"""
Check if dicoms are compressed or not
"""
header = pydicom.read_file(dicom_file,
defer_size="1 KB",
stop_before_pixels=True,
force=force)
uncompressed_types = ["1.2.840.10008.1.2",
"1.2.840.10008.1.2.1",
"1.2.840.10008.1.2.1.99",
"1.2.840.10008.1.2.2"]
if 'TransferSyntaxUID' in header.file_meta and header.file_meta.TransferSyntaxUID in uncompressed_types:
return False
return True | python | def _is_compressed(dicom_file, force=False):
header = pydicom.read_file(dicom_file,
defer_size="1 KB",
stop_before_pixels=True,
force=force)
uncompressed_types = ["1.2.840.10008.1.2",
"1.2.840.10008.1.2.1",
"1.2.840.10008.1.2.1.99",
"1.2.840.10008.1.2.2"]
if 'TransferSyntaxUID' in header.file_meta and header.file_meta.TransferSyntaxUID in uncompressed_types:
return False
return True | [
"def",
"_is_compressed",
"(",
"dicom_file",
",",
"force",
"=",
"False",
")",
":",
"header",
"=",
"pydicom",
".",
"read_file",
"(",
"dicom_file",
",",
"defer_size",
"=",
"\"1 KB\"",
",",
"stop_before_pixels",
"=",
"True",
",",
"force",
"=",
"force",
")",
"uncompressed_types",
"=",
"[",
"\"1.2.840.10008.1.2\"",
",",
"\"1.2.840.10008.1.2.1\"",
",",
"\"1.2.840.10008.1.2.1.99\"",
",",
"\"1.2.840.10008.1.2.2\"",
"]",
"if",
"'TransferSyntaxUID'",
"in",
"header",
".",
"file_meta",
"and",
"header",
".",
"file_meta",
".",
"TransferSyntaxUID",
"in",
"uncompressed_types",
":",
"return",
"False",
"return",
"True"
]
| Check if dicoms are compressed or not | [
"Check",
"if",
"dicoms",
"are",
"compressed",
"or",
"not"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/compressed_dicom.py#L100-L116 |
icometrix/dicom2nifti | dicom2nifti/compressed_dicom.py | _decompress_dicom | def _decompress_dicom(dicom_file, output_file):
"""
This function can be used to convert a jpeg compressed image to an uncompressed one for further conversion
:param input_file: single dicom file to decompress
"""
gdcmconv_executable = _get_gdcmconv()
subprocess.check_output([gdcmconv_executable, '-w', dicom_file, output_file]) | python | def _decompress_dicom(dicom_file, output_file):
gdcmconv_executable = _get_gdcmconv()
subprocess.check_output([gdcmconv_executable, '-w', dicom_file, output_file]) | [
"def",
"_decompress_dicom",
"(",
"dicom_file",
",",
"output_file",
")",
":",
"gdcmconv_executable",
"=",
"_get_gdcmconv",
"(",
")",
"subprocess",
".",
"check_output",
"(",
"[",
"gdcmconv_executable",
",",
"'-w'",
",",
"dicom_file",
",",
"output_file",
"]",
")"
]
| This function can be used to convert a jpeg compressed image to an uncompressed one for further conversion
:param input_file: single dicom file to decompress | [
"This",
"function",
"can",
"be",
"used",
"to",
"convert",
"a",
"jpeg",
"compressed",
"image",
"to",
"an",
"uncompressed",
"one",
"for",
"further",
"conversion"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/compressed_dicom.py#L119-L127 |
icometrix/dicom2nifti | scripts/dicomdiff.py | dicom_diff | def dicom_diff(file1, file2):
""" Shows the fields that differ between two DICOM images.
Inspired by https://code.google.com/p/pydicom/source/browse/source/dicom/examples/DicomDiff.py
"""
datasets = compressed_dicom.read_file(file1), compressed_dicom.read_file(file2)
rep = []
for dataset in datasets:
lines = (str(dataset.file_meta)+"\n"+str(dataset)).split('\n')
lines = [line + '\n' for line in lines] # add the newline to the end
rep.append(lines)
diff = difflib.Differ()
for line in diff.compare(rep[0], rep[1]):
if (line[0] == '+') or (line[0] == '-'):
sys.stdout.write(line) | python | def dicom_diff(file1, file2):
datasets = compressed_dicom.read_file(file1), compressed_dicom.read_file(file2)
rep = []
for dataset in datasets:
lines = (str(dataset.file_meta)+"\n"+str(dataset)).split('\n')
lines = [line + '\n' for line in lines]
rep.append(lines)
diff = difflib.Differ()
for line in diff.compare(rep[0], rep[1]):
if (line[0] == '+') or (line[0] == '-'):
sys.stdout.write(line) | [
"def",
"dicom_diff",
"(",
"file1",
",",
"file2",
")",
":",
"datasets",
"=",
"compressed_dicom",
".",
"read_file",
"(",
"file1",
")",
",",
"compressed_dicom",
".",
"read_file",
"(",
"file2",
")",
"rep",
"=",
"[",
"]",
"for",
"dataset",
"in",
"datasets",
":",
"lines",
"=",
"(",
"str",
"(",
"dataset",
".",
"file_meta",
")",
"+",
"\"\\n\"",
"+",
"str",
"(",
"dataset",
")",
")",
".",
"split",
"(",
"'\\n'",
")",
"lines",
"=",
"[",
"line",
"+",
"'\\n'",
"for",
"line",
"in",
"lines",
"]",
"# add the newline to the end",
"rep",
".",
"append",
"(",
"lines",
")",
"diff",
"=",
"difflib",
".",
"Differ",
"(",
")",
"for",
"line",
"in",
"diff",
".",
"compare",
"(",
"rep",
"[",
"0",
"]",
",",
"rep",
"[",
"1",
"]",
")",
":",
"if",
"(",
"line",
"[",
"0",
"]",
"==",
"'+'",
")",
"or",
"(",
"line",
"[",
"0",
"]",
"==",
"'-'",
")",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"line",
")"
]
| Shows the fields that differ between two DICOM images.
Inspired by https://code.google.com/p/pydicom/source/browse/source/dicom/examples/DicomDiff.py | [
"Shows",
"the",
"fields",
"that",
"differ",
"between",
"two",
"DICOM",
"images",
"."
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/scripts/dicomdiff.py#L14-L32 |
icometrix/dicom2nifti | dicom2nifti/image_volume.py | ImageVolume.get_slice | def get_slice(self, slice_type, slice_number, time_point=0):
"""
Returns a slice of the dataset.
slice.data contains the window/levelled values, in uint8
slice.original_data contains the original data for this slice
:param time_point: in case of 4d nifti the 4th dimension
:param slice_number: the slice number
:param slice_type: tye slice type (AXIAL, SAGITTAL, CORONAL)
"""
slice_ = Slice()
slice_.slice_number = slice_number
# assert that slice_ number is withing the range
assert slice_number >= 0
assert slice_number < self._get_number_of_slices(slice_type)
slice_data = None
if slice_type == SliceType.AXIAL:
slice_data = self.__get_raw_slice__(slice_number, self.axial_orientation, time_point)
slice_.slice_orientation = self.axial_orientation
elif slice_type == SliceType.SAGITTAL:
slice_data = self.__get_raw_slice__(slice_number, self.sagittal_orientation, time_point)
slice_.slice_orientation = self.sagittal_orientation
elif slice_type == SliceType.CORONAL:
slice_data = self.__get_raw_slice__(slice_number, self.coronal_orientation, time_point)
slice_.slice_orientation = self.coronal_orientation
# make a copy of the slice_ so we do not modify the orignal
slice_.original_data = slice_data
return slice_ | python | def get_slice(self, slice_type, slice_number, time_point=0):
slice_ = Slice()
slice_.slice_number = slice_number
assert slice_number >= 0
assert slice_number < self._get_number_of_slices(slice_type)
slice_data = None
if slice_type == SliceType.AXIAL:
slice_data = self.__get_raw_slice__(slice_number, self.axial_orientation, time_point)
slice_.slice_orientation = self.axial_orientation
elif slice_type == SliceType.SAGITTAL:
slice_data = self.__get_raw_slice__(slice_number, self.sagittal_orientation, time_point)
slice_.slice_orientation = self.sagittal_orientation
elif slice_type == SliceType.CORONAL:
slice_data = self.__get_raw_slice__(slice_number, self.coronal_orientation, time_point)
slice_.slice_orientation = self.coronal_orientation
slice_.original_data = slice_data
return slice_ | [
"def",
"get_slice",
"(",
"self",
",",
"slice_type",
",",
"slice_number",
",",
"time_point",
"=",
"0",
")",
":",
"slice_",
"=",
"Slice",
"(",
")",
"slice_",
".",
"slice_number",
"=",
"slice_number",
"# assert that slice_ number is withing the range",
"assert",
"slice_number",
">=",
"0",
"assert",
"slice_number",
"<",
"self",
".",
"_get_number_of_slices",
"(",
"slice_type",
")",
"slice_data",
"=",
"None",
"if",
"slice_type",
"==",
"SliceType",
".",
"AXIAL",
":",
"slice_data",
"=",
"self",
".",
"__get_raw_slice__",
"(",
"slice_number",
",",
"self",
".",
"axial_orientation",
",",
"time_point",
")",
"slice_",
".",
"slice_orientation",
"=",
"self",
".",
"axial_orientation",
"elif",
"slice_type",
"==",
"SliceType",
".",
"SAGITTAL",
":",
"slice_data",
"=",
"self",
".",
"__get_raw_slice__",
"(",
"slice_number",
",",
"self",
".",
"sagittal_orientation",
",",
"time_point",
")",
"slice_",
".",
"slice_orientation",
"=",
"self",
".",
"sagittal_orientation",
"elif",
"slice_type",
"==",
"SliceType",
".",
"CORONAL",
":",
"slice_data",
"=",
"self",
".",
"__get_raw_slice__",
"(",
"slice_number",
",",
"self",
".",
"coronal_orientation",
",",
"time_point",
")",
"slice_",
".",
"slice_orientation",
"=",
"self",
".",
"coronal_orientation",
"# make a copy of the slice_ so we do not modify the orignal",
"slice_",
".",
"original_data",
"=",
"slice_data",
"return",
"slice_"
]
| Returns a slice of the dataset.
slice.data contains the window/levelled values, in uint8
slice.original_data contains the original data for this slice
:param time_point: in case of 4d nifti the 4th dimension
:param slice_number: the slice number
:param slice_type: tye slice type (AXIAL, SAGITTAL, CORONAL) | [
"Returns",
"a",
"slice",
"of",
"the",
"dataset",
".",
"slice",
".",
"data",
"contains",
"the",
"window",
"/",
"levelled",
"values",
"in",
"uint8",
"slice",
".",
"original_data",
"contains",
"the",
"original",
"data",
"for",
"this",
"slice",
":",
"param",
"time_point",
":",
"in",
"case",
"of",
"4d",
"nifti",
"the",
"4th",
"dimension",
":",
"param",
"slice_number",
":",
"the",
"slice",
"number",
":",
"param",
"slice_type",
":",
"tye",
"slice",
"type",
"(",
"AXIAL",
"SAGITTAL",
"CORONAL",
")"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/image_volume.py#L138-L164 |
icometrix/dicom2nifti | dicom2nifti/image_volume.py | ImageVolume._get_number_of_slices | def _get_number_of_slices(self, slice_type):
"""
Get the number of slices in a certain direction
"""
if slice_type == SliceType.AXIAL:
return self.dimensions[self.axial_orientation.normal_component]
elif slice_type == SliceType.SAGITTAL:
return self.dimensions[self.sagittal_orientation.normal_component]
elif slice_type == SliceType.CORONAL:
return self.dimensions[self.coronal_orientation.normal_component] | python | def _get_number_of_slices(self, slice_type):
if slice_type == SliceType.AXIAL:
return self.dimensions[self.axial_orientation.normal_component]
elif slice_type == SliceType.SAGITTAL:
return self.dimensions[self.sagittal_orientation.normal_component]
elif slice_type == SliceType.CORONAL:
return self.dimensions[self.coronal_orientation.normal_component] | [
"def",
"_get_number_of_slices",
"(",
"self",
",",
"slice_type",
")",
":",
"if",
"slice_type",
"==",
"SliceType",
".",
"AXIAL",
":",
"return",
"self",
".",
"dimensions",
"[",
"self",
".",
"axial_orientation",
".",
"normal_component",
"]",
"elif",
"slice_type",
"==",
"SliceType",
".",
"SAGITTAL",
":",
"return",
"self",
".",
"dimensions",
"[",
"self",
".",
"sagittal_orientation",
".",
"normal_component",
"]",
"elif",
"slice_type",
"==",
"SliceType",
".",
"CORONAL",
":",
"return",
"self",
".",
"dimensions",
"[",
"self",
".",
"coronal_orientation",
".",
"normal_component",
"]"
]
| Get the number of slices in a certain direction | [
"Get",
"the",
"number",
"of",
"slices",
"in",
"a",
"certain",
"direction"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/image_volume.py#L166-L175 |
icometrix/dicom2nifti | dicom2nifti/convert_dicom.py | dicom_series_to_nifti | def dicom_series_to_nifti(original_dicom_directory, output_file=None, reorient_nifti=True):
""" Converts dicom single series (see pydicom) to nifty, mimicking SPM
Examples: See unit test
will return a dictionary containing
- the NIFTI under key 'NIFTI'
- the NIFTI file path under 'NII_FILE'
- the BVAL file path under 'BVAL_FILE' (only for dti)
- the BVEC file path under 'BVEC_FILE' (only for dti)
IMPORTANT:
If no specific sequence type can be found it will default to anatomical and try to convert.
You should check that the data you are trying to convert is supported by this code
Inspired by http://nipy.sourceforge.net/nibabel/dicom/spm_dicom.html
Inspired by http://code.google.com/p/pydicom/source/browse/source/dicom/contrib/pydicom_series.py
:param reorient_nifti: if True the nifti affine and data will be updated so the data is stored LAS oriented
:param output_file: file path to write to if not set to None
:param original_dicom_directory: directory with the dicom files for a single series/scan
:return nibabel image
"""
# copy files so we can can modify without altering the original
temp_directory = tempfile.mkdtemp()
try:
dicom_directory = os.path.join(temp_directory, 'dicom')
shutil.copytree(original_dicom_directory, dicom_directory)
dicom_input = common.read_dicom_directory(dicom_directory)
return dicom_array_to_nifti(dicom_input, output_file, reorient_nifti)
except AttributeError as exception:
reraise(
tp=ConversionError,
value=ConversionError(str(exception)),
tb=sys.exc_info()[2])
finally:
# remove the copied data
shutil.rmtree(temp_directory) | python | def dicom_series_to_nifti(original_dicom_directory, output_file=None, reorient_nifti=True):
temp_directory = tempfile.mkdtemp()
try:
dicom_directory = os.path.join(temp_directory, 'dicom')
shutil.copytree(original_dicom_directory, dicom_directory)
dicom_input = common.read_dicom_directory(dicom_directory)
return dicom_array_to_nifti(dicom_input, output_file, reorient_nifti)
except AttributeError as exception:
reraise(
tp=ConversionError,
value=ConversionError(str(exception)),
tb=sys.exc_info()[2])
finally:
shutil.rmtree(temp_directory) | [
"def",
"dicom_series_to_nifti",
"(",
"original_dicom_directory",
",",
"output_file",
"=",
"None",
",",
"reorient_nifti",
"=",
"True",
")",
":",
"# copy files so we can can modify without altering the original",
"temp_directory",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"try",
":",
"dicom_directory",
"=",
"os",
".",
"path",
".",
"join",
"(",
"temp_directory",
",",
"'dicom'",
")",
"shutil",
".",
"copytree",
"(",
"original_dicom_directory",
",",
"dicom_directory",
")",
"dicom_input",
"=",
"common",
".",
"read_dicom_directory",
"(",
"dicom_directory",
")",
"return",
"dicom_array_to_nifti",
"(",
"dicom_input",
",",
"output_file",
",",
"reorient_nifti",
")",
"except",
"AttributeError",
"as",
"exception",
":",
"reraise",
"(",
"tp",
"=",
"ConversionError",
",",
"value",
"=",
"ConversionError",
"(",
"str",
"(",
"exception",
")",
")",
",",
"tb",
"=",
"sys",
".",
"exc_info",
"(",
")",
"[",
"2",
"]",
")",
"finally",
":",
"# remove the copied data",
"shutil",
".",
"rmtree",
"(",
"temp_directory",
")"
]
| Converts dicom single series (see pydicom) to nifty, mimicking SPM
Examples: See unit test
will return a dictionary containing
- the NIFTI under key 'NIFTI'
- the NIFTI file path under 'NII_FILE'
- the BVAL file path under 'BVAL_FILE' (only for dti)
- the BVEC file path under 'BVEC_FILE' (only for dti)
IMPORTANT:
If no specific sequence type can be found it will default to anatomical and try to convert.
You should check that the data you are trying to convert is supported by this code
Inspired by http://nipy.sourceforge.net/nibabel/dicom/spm_dicom.html
Inspired by http://code.google.com/p/pydicom/source/browse/source/dicom/contrib/pydicom_series.py
:param reorient_nifti: if True the nifti affine and data will be updated so the data is stored LAS oriented
:param output_file: file path to write to if not set to None
:param original_dicom_directory: directory with the dicom files for a single series/scan
:return nibabel image | [
"Converts",
"dicom",
"single",
"series",
"(",
"see",
"pydicom",
")",
"to",
"nifty",
"mimicking",
"SPM"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/convert_dicom.py#L54-L96 |
icometrix/dicom2nifti | dicom2nifti/convert_dicom.py | dicom_array_to_nifti | def dicom_array_to_nifti(dicom_list, output_file, reorient_nifti=True):
""" Converts dicom single series (see pydicom) to nifty, mimicking SPM
Examples: See unit test
will return a dictionary containing
- the NIFTI under key 'NIFTI'
- the NIFTI file path under 'NII_FILE'
- the BVAL file path under 'BVAL_FILE' (only for dti)
- the BVEC file path under 'BVEC_FILE' (only for dti)
IMPORTANT:
If no specific sequence type can be found it will default to anatomical and try to convert.
You should check that the data you are trying to convert is supported by this code
Inspired by http://nipy.sourceforge.net/nibabel/dicom/spm_dicom.html
Inspired by http://code.google.com/p/pydicom/source/browse/source/dicom/contrib/pydicom_series.py
:param reorient_nifti: if True the nifti affine and data will be updated so the data is stored LAS oriented
:param output_file: file path to write to
:param dicom_list: list with uncompressed dicom objects as read by pydicom
"""
# copy files so we can can modify without altering the original
if not are_imaging_dicoms(dicom_list):
raise ConversionValidationError('NON_IMAGING_DICOM_FILES')
vendor = _get_vendor(dicom_list)
if vendor == Vendor.GENERIC:
results = convert_generic.dicom_to_nifti(dicom_list, output_file)
elif vendor == Vendor.SIEMENS:
results = convert_siemens.dicom_to_nifti(dicom_list, output_file)
elif vendor == Vendor.GE:
results = convert_ge.dicom_to_nifti(dicom_list, output_file)
elif vendor == Vendor.PHILIPS:
results = convert_philips.dicom_to_nifti(dicom_list, output_file)
elif vendor == Vendor.HITACHI:
results = convert_hitachi.dicom_to_nifti(dicom_list, output_file)
else:
raise ConversionValidationError("UNSUPPORTED_DATA")
# do image reorientation if needed
if reorient_nifti or settings.resample:
image_reorientation.reorient_image(results['NII_FILE'], results['NII_FILE'])
# resampling needs to be after reorientation
if settings.resample:
if not common.is_orthogonal_nifti(results['NII_FILE']):
resample.resample_single_nifti(results['NII_FILE'])
return results | python | def dicom_array_to_nifti(dicom_list, output_file, reorient_nifti=True):
if not are_imaging_dicoms(dicom_list):
raise ConversionValidationError('NON_IMAGING_DICOM_FILES')
vendor = _get_vendor(dicom_list)
if vendor == Vendor.GENERIC:
results = convert_generic.dicom_to_nifti(dicom_list, output_file)
elif vendor == Vendor.SIEMENS:
results = convert_siemens.dicom_to_nifti(dicom_list, output_file)
elif vendor == Vendor.GE:
results = convert_ge.dicom_to_nifti(dicom_list, output_file)
elif vendor == Vendor.PHILIPS:
results = convert_philips.dicom_to_nifti(dicom_list, output_file)
elif vendor == Vendor.HITACHI:
results = convert_hitachi.dicom_to_nifti(dicom_list, output_file)
else:
raise ConversionValidationError("UNSUPPORTED_DATA")
if reorient_nifti or settings.resample:
image_reorientation.reorient_image(results['NII_FILE'], results['NII_FILE'])
if settings.resample:
if not common.is_orthogonal_nifti(results['NII_FILE']):
resample.resample_single_nifti(results['NII_FILE'])
return results | [
"def",
"dicom_array_to_nifti",
"(",
"dicom_list",
",",
"output_file",
",",
"reorient_nifti",
"=",
"True",
")",
":",
"# copy files so we can can modify without altering the original",
"if",
"not",
"are_imaging_dicoms",
"(",
"dicom_list",
")",
":",
"raise",
"ConversionValidationError",
"(",
"'NON_IMAGING_DICOM_FILES'",
")",
"vendor",
"=",
"_get_vendor",
"(",
"dicom_list",
")",
"if",
"vendor",
"==",
"Vendor",
".",
"GENERIC",
":",
"results",
"=",
"convert_generic",
".",
"dicom_to_nifti",
"(",
"dicom_list",
",",
"output_file",
")",
"elif",
"vendor",
"==",
"Vendor",
".",
"SIEMENS",
":",
"results",
"=",
"convert_siemens",
".",
"dicom_to_nifti",
"(",
"dicom_list",
",",
"output_file",
")",
"elif",
"vendor",
"==",
"Vendor",
".",
"GE",
":",
"results",
"=",
"convert_ge",
".",
"dicom_to_nifti",
"(",
"dicom_list",
",",
"output_file",
")",
"elif",
"vendor",
"==",
"Vendor",
".",
"PHILIPS",
":",
"results",
"=",
"convert_philips",
".",
"dicom_to_nifti",
"(",
"dicom_list",
",",
"output_file",
")",
"elif",
"vendor",
"==",
"Vendor",
".",
"HITACHI",
":",
"results",
"=",
"convert_hitachi",
".",
"dicom_to_nifti",
"(",
"dicom_list",
",",
"output_file",
")",
"else",
":",
"raise",
"ConversionValidationError",
"(",
"\"UNSUPPORTED_DATA\"",
")",
"# do image reorientation if needed",
"if",
"reorient_nifti",
"or",
"settings",
".",
"resample",
":",
"image_reorientation",
".",
"reorient_image",
"(",
"results",
"[",
"'NII_FILE'",
"]",
",",
"results",
"[",
"'NII_FILE'",
"]",
")",
"# resampling needs to be after reorientation",
"if",
"settings",
".",
"resample",
":",
"if",
"not",
"common",
".",
"is_orthogonal_nifti",
"(",
"results",
"[",
"'NII_FILE'",
"]",
")",
":",
"resample",
".",
"resample_single_nifti",
"(",
"results",
"[",
"'NII_FILE'",
"]",
")",
"return",
"results"
]
| Converts dicom single series (see pydicom) to nifty, mimicking SPM
Examples: See unit test
will return a dictionary containing
- the NIFTI under key 'NIFTI'
- the NIFTI file path under 'NII_FILE'
- the BVAL file path under 'BVAL_FILE' (only for dti)
- the BVEC file path under 'BVEC_FILE' (only for dti)
IMPORTANT:
If no specific sequence type can be found it will default to anatomical and try to convert.
You should check that the data you are trying to convert is supported by this code
Inspired by http://nipy.sourceforge.net/nibabel/dicom/spm_dicom.html
Inspired by http://code.google.com/p/pydicom/source/browse/source/dicom/contrib/pydicom_series.py
:param reorient_nifti: if True the nifti affine and data will be updated so the data is stored LAS oriented
:param output_file: file path to write to
:param dicom_list: list with uncompressed dicom objects as read by pydicom | [
"Converts",
"dicom",
"single",
"series",
"(",
"see",
"pydicom",
")",
"to",
"nifty",
"mimicking",
"SPM"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/convert_dicom.py#L99-L149 |
icometrix/dicom2nifti | dicom2nifti/convert_dicom.py | are_imaging_dicoms | def are_imaging_dicoms(dicom_input):
"""
This function will check the dicom headers to see which type of series it is
Possibilities are fMRI, DTI, Anatomical (if no clear type is found anatomical is used)
:param dicom_input: directory with dicom files or a list of dicom objects
"""
# if it is philips and multiframe dicom then we assume it is ok
if common.is_philips(dicom_input):
if common.is_multiframe_dicom(dicom_input):
return True
# for all others if there is image position patient we assume it is ok
header = dicom_input[0]
return Tag(0x0020, 0x0037) in header | python | def are_imaging_dicoms(dicom_input):
if common.is_philips(dicom_input):
if common.is_multiframe_dicom(dicom_input):
return True
header = dicom_input[0]
return Tag(0x0020, 0x0037) in header | [
"def",
"are_imaging_dicoms",
"(",
"dicom_input",
")",
":",
"# if it is philips and multiframe dicom then we assume it is ok",
"if",
"common",
".",
"is_philips",
"(",
"dicom_input",
")",
":",
"if",
"common",
".",
"is_multiframe_dicom",
"(",
"dicom_input",
")",
":",
"return",
"True",
"# for all others if there is image position patient we assume it is ok",
"header",
"=",
"dicom_input",
"[",
"0",
"]",
"return",
"Tag",
"(",
"0x0020",
",",
"0x0037",
")",
"in",
"header"
]
| This function will check the dicom headers to see which type of series it is
Possibilities are fMRI, DTI, Anatomical (if no clear type is found anatomical is used)
:param dicom_input: directory with dicom files or a list of dicom objects | [
"This",
"function",
"will",
"check",
"the",
"dicom",
"headers",
"to",
"see",
"which",
"type",
"of",
"series",
"it",
"is",
"Possibilities",
"are",
"fMRI",
"DTI",
"Anatomical",
"(",
"if",
"no",
"clear",
"type",
"is",
"found",
"anatomical",
"is",
"used",
")"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/convert_dicom.py#L152-L167 |
icometrix/dicom2nifti | dicom2nifti/convert_dicom.py | _get_vendor | def _get_vendor(dicom_input):
"""
This function will check the dicom headers to see which type of series it is
Possibilities are fMRI, DTI, Anatomical (if no clear type is found anatomical is used)
"""
# check if it is siemens
if common.is_siemens(dicom_input):
logger.info('Found manufacturer: SIEMENS')
return Vendor.SIEMENS
# check if it is ge
if common.is_ge(dicom_input):
logger.info('Found manufacturer: GE')
return Vendor.GE
# check if it is philips
if common.is_philips(dicom_input):
logger.info('Found manufacturer: PHILIPS')
return Vendor.PHILIPS
# check if it is philips
if common.is_hitachi(dicom_input):
logger.info('Found manufacturer: HITACHI')
return Vendor.HITACHI
# generic by default
logger.info('WARNING: Assuming generic vendor conversion (ANATOMICAL)')
return Vendor.GENERIC | python | def _get_vendor(dicom_input):
if common.is_siemens(dicom_input):
logger.info('Found manufacturer: SIEMENS')
return Vendor.SIEMENS
if common.is_ge(dicom_input):
logger.info('Found manufacturer: GE')
return Vendor.GE
if common.is_philips(dicom_input):
logger.info('Found manufacturer: PHILIPS')
return Vendor.PHILIPS
if common.is_hitachi(dicom_input):
logger.info('Found manufacturer: HITACHI')
return Vendor.HITACHI
logger.info('WARNING: Assuming generic vendor conversion (ANATOMICAL)')
return Vendor.GENERIC | [
"def",
"_get_vendor",
"(",
"dicom_input",
")",
":",
"# check if it is siemens",
"if",
"common",
".",
"is_siemens",
"(",
"dicom_input",
")",
":",
"logger",
".",
"info",
"(",
"'Found manufacturer: SIEMENS'",
")",
"return",
"Vendor",
".",
"SIEMENS",
"# check if it is ge",
"if",
"common",
".",
"is_ge",
"(",
"dicom_input",
")",
":",
"logger",
".",
"info",
"(",
"'Found manufacturer: GE'",
")",
"return",
"Vendor",
".",
"GE",
"# check if it is philips",
"if",
"common",
".",
"is_philips",
"(",
"dicom_input",
")",
":",
"logger",
".",
"info",
"(",
"'Found manufacturer: PHILIPS'",
")",
"return",
"Vendor",
".",
"PHILIPS",
"# check if it is philips",
"if",
"common",
".",
"is_hitachi",
"(",
"dicom_input",
")",
":",
"logger",
".",
"info",
"(",
"'Found manufacturer: HITACHI'",
")",
"return",
"Vendor",
".",
"HITACHI",
"# generic by default",
"logger",
".",
"info",
"(",
"'WARNING: Assuming generic vendor conversion (ANATOMICAL)'",
")",
"return",
"Vendor",
".",
"GENERIC"
]
| This function will check the dicom headers to see which type of series it is
Possibilities are fMRI, DTI, Anatomical (if no clear type is found anatomical is used) | [
"This",
"function",
"will",
"check",
"the",
"dicom",
"headers",
"to",
"see",
"which",
"type",
"of",
"series",
"it",
"is",
"Possibilities",
"are",
"fMRI",
"DTI",
"Anatomical",
"(",
"if",
"no",
"clear",
"type",
"is",
"found",
"anatomical",
"is",
"used",
")"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/convert_dicom.py#L170-L193 |
icometrix/dicom2nifti | dicom2nifti/convert_dicom.py | _get_first_header | def _get_first_header(dicom_directory):
"""
Function to get the first dicom file form a directory and return the header
Useful to determine the type of data to convert
:param dicom_directory: directory with dicom files
"""
# looping over all files
for root, _, file_names in os.walk(dicom_directory):
# go over all the files and try to read the dicom header
for file_name in file_names:
file_path = os.path.join(root, file_name)
# check wither it is a dicom file
if not compressed_dicom.is_dicom_file(file_path):
continue
# read the headers
return compressed_dicom.read_file(file_path,
stop_before_pixels=True,
force=dicom2nifti.settings.pydicom_read_force)
# no dicom files found
raise ConversionError('NO_DICOM_FILES_FOUND') | python | def _get_first_header(dicom_directory):
for root, _, file_names in os.walk(dicom_directory):
for file_name in file_names:
file_path = os.path.join(root, file_name)
if not compressed_dicom.is_dicom_file(file_path):
continue
return compressed_dicom.read_file(file_path,
stop_before_pixels=True,
force=dicom2nifti.settings.pydicom_read_force)
raise ConversionError('NO_DICOM_FILES_FOUND') | [
"def",
"_get_first_header",
"(",
"dicom_directory",
")",
":",
"# looping over all files",
"for",
"root",
",",
"_",
",",
"file_names",
"in",
"os",
".",
"walk",
"(",
"dicom_directory",
")",
":",
"# go over all the files and try to read the dicom header",
"for",
"file_name",
"in",
"file_names",
":",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"file_name",
")",
"# check wither it is a dicom file",
"if",
"not",
"compressed_dicom",
".",
"is_dicom_file",
"(",
"file_path",
")",
":",
"continue",
"# read the headers",
"return",
"compressed_dicom",
".",
"read_file",
"(",
"file_path",
",",
"stop_before_pixels",
"=",
"True",
",",
"force",
"=",
"dicom2nifti",
".",
"settings",
".",
"pydicom_read_force",
")",
"# no dicom files found",
"raise",
"ConversionError",
"(",
"'NO_DICOM_FILES_FOUND'",
")"
]
| Function to get the first dicom file form a directory and return the header
Useful to determine the type of data to convert
:param dicom_directory: directory with dicom files | [
"Function",
"to",
"get",
"the",
"first",
"dicom",
"file",
"form",
"a",
"directory",
"and",
"return",
"the",
"header",
"Useful",
"to",
"determine",
"the",
"type",
"of",
"data",
"to",
"convert"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/convert_dicom.py#L196-L216 |
icometrix/dicom2nifti | scripts/shrink_singleframe.py | _shrink_file | def _shrink_file(dicom_file_in, subsample_factor):
"""
Anonimize a single dicomfile
:param dicom_file_in: filepath for input file
:param dicom_file_out: filepath for output file
:param fields_to_keep: dicom tags to keep
"""
# Default meta_fields
# Required fields according to reference
dicom_file_out = dicom_file_in
# Load dicom_file_in
dicom_in = compressed_dicom.read_file(dicom_file_in)
# Create new dicom file
# Set new file meta information
file_meta = pydicom.dataset.Dataset()
for key, value in dicom_in.file_meta.items():
file_meta.add(value)
# Create the FileDataset instance (initially no data elements, but file_meta supplied)
dicom_out = pydicom.dataset.FileDataset(dicom_file_out, {}, file_meta=file_meta, preamble=b'\0' * 128)
# Copy transfer syntax
dicom_out.is_little_endian = dicom_in.is_little_endian
dicom_out.is_implicit_VR = dicom_in.is_implicit_VR
rows = 0
columns = 0
# Add the data elements
for field_key, field_value in dicom_in.items():
logging.info(field_key)
if field_key == (0x7fe0, 0x0010):
pixel_array = dicom_in.pixel_array[::subsample_factor, ::subsample_factor]
dicom_out.PixelData = pixel_array.tostring() # = byte array (see pydicom docs)
rows = pixel_array.shape[1]
columns = pixel_array.shape[0]
# noinspection PyPep8Naming
dicom_out[0x7fe0, 0x0010].VR = 'OB'
else:
dicom_out.add(field_value)
dicom_out.PixelSpacing[0] *= subsample_factor
dicom_out.PixelSpacing[1] *= subsample_factor
dicom_out.Rows = rows
dicom_out.Columns = columns
# Save dicom_file_out
# Make sure we have a directory
if not os.path.exists(os.path.dirname(dicom_file_out)):
logging.info('Decompressing files')
# Save the file
dicom_out.save_as(dicom_file_out, write_like_original=False) | python | def _shrink_file(dicom_file_in, subsample_factor):
dicom_file_out = dicom_file_in
dicom_in = compressed_dicom.read_file(dicom_file_in)
file_meta = pydicom.dataset.Dataset()
for key, value in dicom_in.file_meta.items():
file_meta.add(value)
dicom_out = pydicom.dataset.FileDataset(dicom_file_out, {}, file_meta=file_meta, preamble=b'\0' * 128)
dicom_out.is_little_endian = dicom_in.is_little_endian
dicom_out.is_implicit_VR = dicom_in.is_implicit_VR
rows = 0
columns = 0
for field_key, field_value in dicom_in.items():
logging.info(field_key)
if field_key == (0x7fe0, 0x0010):
pixel_array = dicom_in.pixel_array[::subsample_factor, ::subsample_factor]
dicom_out.PixelData = pixel_array.tostring()
rows = pixel_array.shape[1]
columns = pixel_array.shape[0]
dicom_out[0x7fe0, 0x0010].VR = 'OB'
else:
dicom_out.add(field_value)
dicom_out.PixelSpacing[0] *= subsample_factor
dicom_out.PixelSpacing[1] *= subsample_factor
dicom_out.Rows = rows
dicom_out.Columns = columns
if not os.path.exists(os.path.dirname(dicom_file_out)):
logging.info('Decompressing files')
dicom_out.save_as(dicom_file_out, write_like_original=False) | [
"def",
"_shrink_file",
"(",
"dicom_file_in",
",",
"subsample_factor",
")",
":",
"# Default meta_fields",
"# Required fields according to reference",
"dicom_file_out",
"=",
"dicom_file_in",
"# Load dicom_file_in",
"dicom_in",
"=",
"compressed_dicom",
".",
"read_file",
"(",
"dicom_file_in",
")",
"# Create new dicom file",
"# Set new file meta information",
"file_meta",
"=",
"pydicom",
".",
"dataset",
".",
"Dataset",
"(",
")",
"for",
"key",
",",
"value",
"in",
"dicom_in",
".",
"file_meta",
".",
"items",
"(",
")",
":",
"file_meta",
".",
"add",
"(",
"value",
")",
"# Create the FileDataset instance (initially no data elements, but file_meta supplied)",
"dicom_out",
"=",
"pydicom",
".",
"dataset",
".",
"FileDataset",
"(",
"dicom_file_out",
",",
"{",
"}",
",",
"file_meta",
"=",
"file_meta",
",",
"preamble",
"=",
"b'\\0'",
"*",
"128",
")",
"# Copy transfer syntax",
"dicom_out",
".",
"is_little_endian",
"=",
"dicom_in",
".",
"is_little_endian",
"dicom_out",
".",
"is_implicit_VR",
"=",
"dicom_in",
".",
"is_implicit_VR",
"rows",
"=",
"0",
"columns",
"=",
"0",
"# Add the data elements",
"for",
"field_key",
",",
"field_value",
"in",
"dicom_in",
".",
"items",
"(",
")",
":",
"logging",
".",
"info",
"(",
"field_key",
")",
"if",
"field_key",
"==",
"(",
"0x7fe0",
",",
"0x0010",
")",
":",
"pixel_array",
"=",
"dicom_in",
".",
"pixel_array",
"[",
":",
":",
"subsample_factor",
",",
":",
":",
"subsample_factor",
"]",
"dicom_out",
".",
"PixelData",
"=",
"pixel_array",
".",
"tostring",
"(",
")",
"# = byte array (see pydicom docs)",
"rows",
"=",
"pixel_array",
".",
"shape",
"[",
"1",
"]",
"columns",
"=",
"pixel_array",
".",
"shape",
"[",
"0",
"]",
"# noinspection PyPep8Naming",
"dicom_out",
"[",
"0x7fe0",
",",
"0x0010",
"]",
".",
"VR",
"=",
"'OB'",
"else",
":",
"dicom_out",
".",
"add",
"(",
"field_value",
")",
"dicom_out",
".",
"PixelSpacing",
"[",
"0",
"]",
"*=",
"subsample_factor",
"dicom_out",
".",
"PixelSpacing",
"[",
"1",
"]",
"*=",
"subsample_factor",
"dicom_out",
".",
"Rows",
"=",
"rows",
"dicom_out",
".",
"Columns",
"=",
"columns",
"# Save dicom_file_out",
"# Make sure we have a directory",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"dicom_file_out",
")",
")",
":",
"logging",
".",
"info",
"(",
"'Decompressing files'",
")",
"# Save the file",
"dicom_out",
".",
"save_as",
"(",
"dicom_file_out",
",",
"write_like_original",
"=",
"False",
")"
]
| Anonimize a single dicomfile
:param dicom_file_in: filepath for input file
:param dicom_file_out: filepath for output file
:param fields_to_keep: dicom tags to keep | [
"Anonimize",
"a",
"single",
"dicomfile",
":",
"param",
"dicom_file_in",
":",
"filepath",
"for",
"input",
"file",
":",
"param",
"dicom_file_out",
":",
"filepath",
"for",
"output",
"file",
":",
"param",
"fields_to_keep",
":",
"dicom",
"tags",
"to",
"keep"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/scripts/shrink_singleframe.py#L16-L72 |
icometrix/dicom2nifti | dicom2nifti/image_reorientation.py | reorient_image | def reorient_image(input_image, output_image):
"""
Change the orientation of the Image data in order to be in LAS space
x will represent the coronal plane, y the sagittal and z the axial plane.
x increases from Right (R) to Left (L), y from Posterior (P) to Anterior (A) and z from Inferior (I) to Superior (S)
:returns: The output image in nibabel form
:param output_image: filepath to the nibabel image
:param input_image: filepath to the nibabel image
"""
# Use the imageVolume module to find which coordinate corresponds to each plane
# and get the image data in RAS orientation
# print 'Reading nifti'
image = load(input_image)
# 4d have a different conversion to 3d
# print 'Reorganizing data'
if image.nifti_data.squeeze().ndim == 4:
new_image = _reorient_4d(image)
elif image.nifti_data.squeeze().ndim == 3:
new_image = _reorient_3d(image)
else:
raise Exception('Only 3d and 4d images are supported')
# print 'Recreating affine'
affine = image.nifti.affine
# Based on VolumeImage.py where slice orientation 1 represents the axial plane
# Flipping on the data may be needed based on x_inverted, y_inverted, ZInverted
# Create new affine header by changing the order of the columns of the input image header
# the last column with the origin depends on the origin of the original image, the size and the direction of x,y,z
new_affine = numpy.eye(4)
new_affine[:, 0] = affine[:, image.sagittal_orientation.normal_component]
new_affine[:, 1] = affine[:, image.coronal_orientation.normal_component]
new_affine[:, 2] = affine[:, image.axial_orientation.normal_component]
point = [0, 0, 0, 1]
# If the orientation of coordinates is inverted, then the origin of the "new" image
# would correspond to the last voxel of the original image
# First we need to find which point is the origin point in image coordinates
# and then transform it in world coordinates
if not image.axial_orientation.x_inverted:
new_affine[:, 0] = - new_affine[:, 0]
point[image.sagittal_orientation.normal_component] = image.dimensions[
image.sagittal_orientation.normal_component] - 1
# new_affine[0, 3] = - new_affine[0, 3]
if image.axial_orientation.y_inverted:
new_affine[:, 1] = - new_affine[:, 1]
point[image.coronal_orientation.normal_component] = image.dimensions[
image.coronal_orientation.normal_component] - 1
# new_affine[1, 3] = - new_affine[1, 3]
if image.coronal_orientation.y_inverted:
new_affine[:, 2] = - new_affine[:, 2]
point[image.axial_orientation.normal_component] = image.dimensions[image.axial_orientation.normal_component] - 1
# new_affine[2, 3] = - new_affine[2, 3]
new_affine[:, 3] = numpy.dot(affine, point)
# DONE: Needs to update new_affine, so that there is no translation difference between the original
# and created image (now there is 1-2 voxels translation)
# print 'Creating new nifti image'
nibabel.nifti1.Nifti1Image(new_image, new_affine).to_filename(output_image) | python | def reorient_image(input_image, output_image):
image = load(input_image)
if image.nifti_data.squeeze().ndim == 4:
new_image = _reorient_4d(image)
elif image.nifti_data.squeeze().ndim == 3:
new_image = _reorient_3d(image)
else:
raise Exception('Only 3d and 4d images are supported')
affine = image.nifti.affine
new_affine = numpy.eye(4)
new_affine[:, 0] = affine[:, image.sagittal_orientation.normal_component]
new_affine[:, 1] = affine[:, image.coronal_orientation.normal_component]
new_affine[:, 2] = affine[:, image.axial_orientation.normal_component]
point = [0, 0, 0, 1]
if not image.axial_orientation.x_inverted:
new_affine[:, 0] = - new_affine[:, 0]
point[image.sagittal_orientation.normal_component] = image.dimensions[
image.sagittal_orientation.normal_component] - 1
if image.axial_orientation.y_inverted:
new_affine[:, 1] = - new_affine[:, 1]
point[image.coronal_orientation.normal_component] = image.dimensions[
image.coronal_orientation.normal_component] - 1
if image.coronal_orientation.y_inverted:
new_affine[:, 2] = - new_affine[:, 2]
point[image.axial_orientation.normal_component] = image.dimensions[image.axial_orientation.normal_component] - 1
new_affine[:, 3] = numpy.dot(affine, point)
nibabel.nifti1.Nifti1Image(new_image, new_affine).to_filename(output_image) | [
"def",
"reorient_image",
"(",
"input_image",
",",
"output_image",
")",
":",
"# Use the imageVolume module to find which coordinate corresponds to each plane",
"# and get the image data in RAS orientation",
"# print 'Reading nifti'",
"image",
"=",
"load",
"(",
"input_image",
")",
"# 4d have a different conversion to 3d",
"# print 'Reorganizing data'",
"if",
"image",
".",
"nifti_data",
".",
"squeeze",
"(",
")",
".",
"ndim",
"==",
"4",
":",
"new_image",
"=",
"_reorient_4d",
"(",
"image",
")",
"elif",
"image",
".",
"nifti_data",
".",
"squeeze",
"(",
")",
".",
"ndim",
"==",
"3",
":",
"new_image",
"=",
"_reorient_3d",
"(",
"image",
")",
"else",
":",
"raise",
"Exception",
"(",
"'Only 3d and 4d images are supported'",
")",
"# print 'Recreating affine'",
"affine",
"=",
"image",
".",
"nifti",
".",
"affine",
"# Based on VolumeImage.py where slice orientation 1 represents the axial plane",
"# Flipping on the data may be needed based on x_inverted, y_inverted, ZInverted",
"# Create new affine header by changing the order of the columns of the input image header",
"# the last column with the origin depends on the origin of the original image, the size and the direction of x,y,z",
"new_affine",
"=",
"numpy",
".",
"eye",
"(",
"4",
")",
"new_affine",
"[",
":",
",",
"0",
"]",
"=",
"affine",
"[",
":",
",",
"image",
".",
"sagittal_orientation",
".",
"normal_component",
"]",
"new_affine",
"[",
":",
",",
"1",
"]",
"=",
"affine",
"[",
":",
",",
"image",
".",
"coronal_orientation",
".",
"normal_component",
"]",
"new_affine",
"[",
":",
",",
"2",
"]",
"=",
"affine",
"[",
":",
",",
"image",
".",
"axial_orientation",
".",
"normal_component",
"]",
"point",
"=",
"[",
"0",
",",
"0",
",",
"0",
",",
"1",
"]",
"# If the orientation of coordinates is inverted, then the origin of the \"new\" image",
"# would correspond to the last voxel of the original image",
"# First we need to find which point is the origin point in image coordinates",
"# and then transform it in world coordinates",
"if",
"not",
"image",
".",
"axial_orientation",
".",
"x_inverted",
":",
"new_affine",
"[",
":",
",",
"0",
"]",
"=",
"-",
"new_affine",
"[",
":",
",",
"0",
"]",
"point",
"[",
"image",
".",
"sagittal_orientation",
".",
"normal_component",
"]",
"=",
"image",
".",
"dimensions",
"[",
"image",
".",
"sagittal_orientation",
".",
"normal_component",
"]",
"-",
"1",
"# new_affine[0, 3] = - new_affine[0, 3]",
"if",
"image",
".",
"axial_orientation",
".",
"y_inverted",
":",
"new_affine",
"[",
":",
",",
"1",
"]",
"=",
"-",
"new_affine",
"[",
":",
",",
"1",
"]",
"point",
"[",
"image",
".",
"coronal_orientation",
".",
"normal_component",
"]",
"=",
"image",
".",
"dimensions",
"[",
"image",
".",
"coronal_orientation",
".",
"normal_component",
"]",
"-",
"1",
"# new_affine[1, 3] = - new_affine[1, 3]",
"if",
"image",
".",
"coronal_orientation",
".",
"y_inverted",
":",
"new_affine",
"[",
":",
",",
"2",
"]",
"=",
"-",
"new_affine",
"[",
":",
",",
"2",
"]",
"point",
"[",
"image",
".",
"axial_orientation",
".",
"normal_component",
"]",
"=",
"image",
".",
"dimensions",
"[",
"image",
".",
"axial_orientation",
".",
"normal_component",
"]",
"-",
"1",
"# new_affine[2, 3] = - new_affine[2, 3]",
"new_affine",
"[",
":",
",",
"3",
"]",
"=",
"numpy",
".",
"dot",
"(",
"affine",
",",
"point",
")",
"# DONE: Needs to update new_affine, so that there is no translation difference between the original",
"# and created image (now there is 1-2 voxels translation)",
"# print 'Creating new nifti image'",
"nibabel",
".",
"nifti1",
".",
"Nifti1Image",
"(",
"new_image",
",",
"new_affine",
")",
".",
"to_filename",
"(",
"output_image",
")"
]
| Change the orientation of the Image data in order to be in LAS space
x will represent the coronal plane, y the sagittal and z the axial plane.
x increases from Right (R) to Left (L), y from Posterior (P) to Anterior (A) and z from Inferior (I) to Superior (S)
:returns: The output image in nibabel form
:param output_image: filepath to the nibabel image
:param input_image: filepath to the nibabel image | [
"Change",
"the",
"orientation",
"of",
"the",
"Image",
"data",
"in",
"order",
"to",
"be",
"in",
"LAS",
"space",
"x",
"will",
"represent",
"the",
"coronal",
"plane",
"y",
"the",
"sagittal",
"and",
"z",
"the",
"axial",
"plane",
".",
"x",
"increases",
"from",
"Right",
"(",
"R",
")",
"to",
"Left",
"(",
"L",
")",
"y",
"from",
"Posterior",
"(",
"P",
")",
"to",
"Anterior",
"(",
"A",
")",
"and",
"z",
"from",
"Inferior",
"(",
"I",
")",
"to",
"Superior",
"(",
"S",
")"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/image_reorientation.py#L18-L80 |
icometrix/dicom2nifti | dicom2nifti/image_reorientation.py | _reorient_3d | def _reorient_3d(image):
"""
Reorganize the data for a 3d nifti
"""
# Create empty array where x,y,z correspond to LR (sagittal), PA (coronal), IS (axial) directions and the size
# of the array in each direction is the same with the corresponding direction of the input image.
new_image = numpy.zeros([image.dimensions[image.sagittal_orientation.normal_component],
image.dimensions[image.coronal_orientation.normal_component],
image.dimensions[image.axial_orientation.normal_component]],
dtype=image.nifti_data.dtype)
# Fill the new image with the values of the input image but with matching the orientation with x,y,z
if image.coronal_orientation.y_inverted:
for i in range(new_image.shape[2]):
new_image[:, :, i] = numpy.fliplr(numpy.squeeze(image.get_slice(SliceType.AXIAL,
new_image.shape[2] - 1 - i).original_data))
else:
for i in range(new_image.shape[2]):
new_image[:, :, i] = numpy.fliplr(numpy.squeeze(image.get_slice(SliceType.AXIAL,
i).original_data))
return new_image | python | def _reorient_3d(image):
new_image = numpy.zeros([image.dimensions[image.sagittal_orientation.normal_component],
image.dimensions[image.coronal_orientation.normal_component],
image.dimensions[image.axial_orientation.normal_component]],
dtype=image.nifti_data.dtype)
if image.coronal_orientation.y_inverted:
for i in range(new_image.shape[2]):
new_image[:, :, i] = numpy.fliplr(numpy.squeeze(image.get_slice(SliceType.AXIAL,
new_image.shape[2] - 1 - i).original_data))
else:
for i in range(new_image.shape[2]):
new_image[:, :, i] = numpy.fliplr(numpy.squeeze(image.get_slice(SliceType.AXIAL,
i).original_data))
return new_image | [
"def",
"_reorient_3d",
"(",
"image",
")",
":",
"# Create empty array where x,y,z correspond to LR (sagittal), PA (coronal), IS (axial) directions and the size",
"# of the array in each direction is the same with the corresponding direction of the input image.",
"new_image",
"=",
"numpy",
".",
"zeros",
"(",
"[",
"image",
".",
"dimensions",
"[",
"image",
".",
"sagittal_orientation",
".",
"normal_component",
"]",
",",
"image",
".",
"dimensions",
"[",
"image",
".",
"coronal_orientation",
".",
"normal_component",
"]",
",",
"image",
".",
"dimensions",
"[",
"image",
".",
"axial_orientation",
".",
"normal_component",
"]",
"]",
",",
"dtype",
"=",
"image",
".",
"nifti_data",
".",
"dtype",
")",
"# Fill the new image with the values of the input image but with matching the orientation with x,y,z",
"if",
"image",
".",
"coronal_orientation",
".",
"y_inverted",
":",
"for",
"i",
"in",
"range",
"(",
"new_image",
".",
"shape",
"[",
"2",
"]",
")",
":",
"new_image",
"[",
":",
",",
":",
",",
"i",
"]",
"=",
"numpy",
".",
"fliplr",
"(",
"numpy",
".",
"squeeze",
"(",
"image",
".",
"get_slice",
"(",
"SliceType",
".",
"AXIAL",
",",
"new_image",
".",
"shape",
"[",
"2",
"]",
"-",
"1",
"-",
"i",
")",
".",
"original_data",
")",
")",
"else",
":",
"for",
"i",
"in",
"range",
"(",
"new_image",
".",
"shape",
"[",
"2",
"]",
")",
":",
"new_image",
"[",
":",
",",
":",
",",
"i",
"]",
"=",
"numpy",
".",
"fliplr",
"(",
"numpy",
".",
"squeeze",
"(",
"image",
".",
"get_slice",
"(",
"SliceType",
".",
"AXIAL",
",",
"i",
")",
".",
"original_data",
")",
")",
"return",
"new_image"
]
| Reorganize the data for a 3d nifti | [
"Reorganize",
"the",
"data",
"for",
"a",
"3d",
"nifti"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/image_reorientation.py#L112-L133 |
icometrix/dicom2nifti | dicom2nifti/convert_philips.py | dicom_to_nifti | def dicom_to_nifti(dicom_input, output_file=None):
"""
This is the main dicom to nifti conversion fuction for philips images.
As input philips images are required. It will then determine the type of images and do the correct conversion
Examples: See unit test
:param output_file: file path to the output nifti
:param dicom_input: directory with dicom files for 1 scan
"""
assert common.is_philips(dicom_input)
if common.is_multiframe_dicom(dicom_input):
_assert_explicit_vr(dicom_input)
logger.info('Found multiframe dicom')
if _is_multiframe_4d(dicom_input):
logger.info('Found sequence type: MULTIFRAME 4D')
return _multiframe_to_nifti(dicom_input, output_file)
if _is_multiframe_anatomical(dicom_input):
logger.info('Found sequence type: MULTIFRAME ANATOMICAL')
return _multiframe_to_nifti(dicom_input, output_file)
else:
logger.info('Found singleframe dicom')
grouped_dicoms = _get_grouped_dicoms(dicom_input)
if _is_singleframe_4d(dicom_input):
logger.info('Found sequence type: SINGLEFRAME 4D')
return _singleframe_to_nifti(grouped_dicoms, output_file)
logger.info('Assuming anatomical data')
return convert_generic.dicom_to_nifti(dicom_input, output_file) | python | def dicom_to_nifti(dicom_input, output_file=None):
assert common.is_philips(dicom_input)
if common.is_multiframe_dicom(dicom_input):
_assert_explicit_vr(dicom_input)
logger.info('Found multiframe dicom')
if _is_multiframe_4d(dicom_input):
logger.info('Found sequence type: MULTIFRAME 4D')
return _multiframe_to_nifti(dicom_input, output_file)
if _is_multiframe_anatomical(dicom_input):
logger.info('Found sequence type: MULTIFRAME ANATOMICAL')
return _multiframe_to_nifti(dicom_input, output_file)
else:
logger.info('Found singleframe dicom')
grouped_dicoms = _get_grouped_dicoms(dicom_input)
if _is_singleframe_4d(dicom_input):
logger.info('Found sequence type: SINGLEFRAME 4D')
return _singleframe_to_nifti(grouped_dicoms, output_file)
logger.info('Assuming anatomical data')
return convert_generic.dicom_to_nifti(dicom_input, output_file) | [
"def",
"dicom_to_nifti",
"(",
"dicom_input",
",",
"output_file",
"=",
"None",
")",
":",
"assert",
"common",
".",
"is_philips",
"(",
"dicom_input",
")",
"if",
"common",
".",
"is_multiframe_dicom",
"(",
"dicom_input",
")",
":",
"_assert_explicit_vr",
"(",
"dicom_input",
")",
"logger",
".",
"info",
"(",
"'Found multiframe dicom'",
")",
"if",
"_is_multiframe_4d",
"(",
"dicom_input",
")",
":",
"logger",
".",
"info",
"(",
"'Found sequence type: MULTIFRAME 4D'",
")",
"return",
"_multiframe_to_nifti",
"(",
"dicom_input",
",",
"output_file",
")",
"if",
"_is_multiframe_anatomical",
"(",
"dicom_input",
")",
":",
"logger",
".",
"info",
"(",
"'Found sequence type: MULTIFRAME ANATOMICAL'",
")",
"return",
"_multiframe_to_nifti",
"(",
"dicom_input",
",",
"output_file",
")",
"else",
":",
"logger",
".",
"info",
"(",
"'Found singleframe dicom'",
")",
"grouped_dicoms",
"=",
"_get_grouped_dicoms",
"(",
"dicom_input",
")",
"if",
"_is_singleframe_4d",
"(",
"dicom_input",
")",
":",
"logger",
".",
"info",
"(",
"'Found sequence type: SINGLEFRAME 4D'",
")",
"return",
"_singleframe_to_nifti",
"(",
"grouped_dicoms",
",",
"output_file",
")",
"logger",
".",
"info",
"(",
"'Assuming anatomical data'",
")",
"return",
"convert_generic",
".",
"dicom_to_nifti",
"(",
"dicom_input",
",",
"output_file",
")"
]
| This is the main dicom to nifti conversion fuction for philips images.
As input philips images are required. It will then determine the type of images and do the correct conversion
Examples: See unit test
:param output_file: file path to the output nifti
:param dicom_input: directory with dicom files for 1 scan | [
"This",
"is",
"the",
"main",
"dicom",
"to",
"nifti",
"conversion",
"fuction",
"for",
"philips",
"images",
".",
"As",
"input",
"philips",
"images",
"are",
"required",
".",
"It",
"will",
"then",
"determine",
"the",
"type",
"of",
"images",
"and",
"do",
"the",
"correct",
"conversion"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/convert_philips.py#L31-L62 |
icometrix/dicom2nifti | dicom2nifti/convert_philips.py | _assert_explicit_vr | def _assert_explicit_vr(dicom_input):
"""
Assert that explicit vr is used
"""
if settings.validate_multiframe_implicit:
header = dicom_input[0]
if header.file_meta[0x0002, 0x0010].value == '1.2.840.10008.1.2':
raise ConversionError('IMPLICIT_VR_ENHANCED_DICOM') | python | def _assert_explicit_vr(dicom_input):
if settings.validate_multiframe_implicit:
header = dicom_input[0]
if header.file_meta[0x0002, 0x0010].value == '1.2.840.10008.1.2':
raise ConversionError('IMPLICIT_VR_ENHANCED_DICOM') | [
"def",
"_assert_explicit_vr",
"(",
"dicom_input",
")",
":",
"if",
"settings",
".",
"validate_multiframe_implicit",
":",
"header",
"=",
"dicom_input",
"[",
"0",
"]",
"if",
"header",
".",
"file_meta",
"[",
"0x0002",
",",
"0x0010",
"]",
".",
"value",
"==",
"'1.2.840.10008.1.2'",
":",
"raise",
"ConversionError",
"(",
"'IMPLICIT_VR_ENHANCED_DICOM'",
")"
]
| Assert that explicit vr is used | [
"Assert",
"that",
"explicit",
"vr",
"is",
"used"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/convert_philips.py#L65-L72 |
icometrix/dicom2nifti | dicom2nifti/convert_philips.py | _is_multiframe_diffusion_imaging | def _is_multiframe_diffusion_imaging(dicom_input):
"""
Use this function to detect if a dicom series is a philips multiframe dti dataset
NOTE: We already assue this is a 4D dataset as input
"""
header = dicom_input[0]
if "PerFrameFunctionalGroupsSequence" not in header:
return False
# check if there is diffusion info in the frame
found_diffusion = False
diffusion_tag = Tag(0x0018, 0x9117)
for frame in header.PerFrameFunctionalGroupsSequence:
if diffusion_tag in frame:
found_diffusion = True
break
if not found_diffusion:
return False
return True | python | def _is_multiframe_diffusion_imaging(dicom_input):
header = dicom_input[0]
if "PerFrameFunctionalGroupsSequence" not in header:
return False
found_diffusion = False
diffusion_tag = Tag(0x0018, 0x9117)
for frame in header.PerFrameFunctionalGroupsSequence:
if diffusion_tag in frame:
found_diffusion = True
break
if not found_diffusion:
return False
return True | [
"def",
"_is_multiframe_diffusion_imaging",
"(",
"dicom_input",
")",
":",
"header",
"=",
"dicom_input",
"[",
"0",
"]",
"if",
"\"PerFrameFunctionalGroupsSequence\"",
"not",
"in",
"header",
":",
"return",
"False",
"# check if there is diffusion info in the frame",
"found_diffusion",
"=",
"False",
"diffusion_tag",
"=",
"Tag",
"(",
"0x0018",
",",
"0x9117",
")",
"for",
"frame",
"in",
"header",
".",
"PerFrameFunctionalGroupsSequence",
":",
"if",
"diffusion_tag",
"in",
"frame",
":",
"found_diffusion",
"=",
"True",
"break",
"if",
"not",
"found_diffusion",
":",
"return",
"False",
"return",
"True"
]
| Use this function to detect if a dicom series is a philips multiframe dti dataset
NOTE: We already assue this is a 4D dataset as input | [
"Use",
"this",
"function",
"to",
"detect",
"if",
"a",
"dicom",
"series",
"is",
"a",
"philips",
"multiframe",
"dti",
"dataset",
"NOTE",
":",
"We",
"already",
"assue",
"this",
"is",
"a",
"4D",
"dataset",
"as",
"input"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/convert_philips.py#L75-L95 |
icometrix/dicom2nifti | dicom2nifti/convert_philips.py | _is_multiframe_4d | def _is_multiframe_4d(dicom_input):
"""
Use this function to detect if a dicom series is a philips multiframe 4D dataset
"""
# check if it is multi frame dicom
if not common.is_multiframe_dicom(dicom_input):
return False
header = dicom_input[0]
# check if there are multiple stacks
number_of_stack_slices = common.get_ss_value(header[Tag(0x2001, 0x105f)][0][Tag(0x2001, 0x102d)])
number_of_stacks = int(int(header.NumberOfFrames) / number_of_stack_slices)
if number_of_stacks <= 1:
return False
return True | python | def _is_multiframe_4d(dicom_input):
if not common.is_multiframe_dicom(dicom_input):
return False
header = dicom_input[0]
number_of_stack_slices = common.get_ss_value(header[Tag(0x2001, 0x105f)][0][Tag(0x2001, 0x102d)])
number_of_stacks = int(int(header.NumberOfFrames) / number_of_stack_slices)
if number_of_stacks <= 1:
return False
return True | [
"def",
"_is_multiframe_4d",
"(",
"dicom_input",
")",
":",
"# check if it is multi frame dicom",
"if",
"not",
"common",
".",
"is_multiframe_dicom",
"(",
"dicom_input",
")",
":",
"return",
"False",
"header",
"=",
"dicom_input",
"[",
"0",
"]",
"# check if there are multiple stacks",
"number_of_stack_slices",
"=",
"common",
".",
"get_ss_value",
"(",
"header",
"[",
"Tag",
"(",
"0x2001",
",",
"0x105f",
")",
"]",
"[",
"0",
"]",
"[",
"Tag",
"(",
"0x2001",
",",
"0x102d",
")",
"]",
")",
"number_of_stacks",
"=",
"int",
"(",
"int",
"(",
"header",
".",
"NumberOfFrames",
")",
"/",
"number_of_stack_slices",
")",
"if",
"number_of_stacks",
"<=",
"1",
":",
"return",
"False",
"return",
"True"
]
| Use this function to detect if a dicom series is a philips multiframe 4D dataset | [
"Use",
"this",
"function",
"to",
"detect",
"if",
"a",
"dicom",
"series",
"is",
"a",
"philips",
"multiframe",
"4D",
"dataset"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/convert_philips.py#L98-L114 |
icometrix/dicom2nifti | dicom2nifti/convert_philips.py | _is_singleframe_4d | def _is_singleframe_4d(dicom_input):
"""
Use this function to detect if a dicom series is a philips singleframe 4D dataset
"""
header = dicom_input[0]
# check if there are stack information
slice_number_mr_tag = Tag(0x2001, 0x100a)
if slice_number_mr_tag not in header:
return False
# check if there are multiple timepoints
grouped_dicoms = _get_grouped_dicoms(dicom_input)
if len(grouped_dicoms) <= 1:
return False
return True | python | def _is_singleframe_4d(dicom_input):
header = dicom_input[0]
slice_number_mr_tag = Tag(0x2001, 0x100a)
if slice_number_mr_tag not in header:
return False
grouped_dicoms = _get_grouped_dicoms(dicom_input)
if len(grouped_dicoms) <= 1:
return False
return True | [
"def",
"_is_singleframe_4d",
"(",
"dicom_input",
")",
":",
"header",
"=",
"dicom_input",
"[",
"0",
"]",
"# check if there are stack information",
"slice_number_mr_tag",
"=",
"Tag",
"(",
"0x2001",
",",
"0x100a",
")",
"if",
"slice_number_mr_tag",
"not",
"in",
"header",
":",
"return",
"False",
"# check if there are multiple timepoints",
"grouped_dicoms",
"=",
"_get_grouped_dicoms",
"(",
"dicom_input",
")",
"if",
"len",
"(",
"grouped_dicoms",
")",
"<=",
"1",
":",
"return",
"False",
"return",
"True"
]
| Use this function to detect if a dicom series is a philips singleframe 4D dataset | [
"Use",
"this",
"function",
"to",
"detect",
"if",
"a",
"dicom",
"series",
"is",
"a",
"philips",
"singleframe",
"4D",
"dataset"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/convert_philips.py#L139-L155 |
icometrix/dicom2nifti | dicom2nifti/convert_philips.py | _is_bval_type_a | def _is_bval_type_a(grouped_dicoms):
"""
Check if the bvals are stored in the first of 2 currently known ways for single frame dti
"""
bval_tag = Tag(0x2001, 0x1003)
bvec_x_tag = Tag(0x2005, 0x10b0)
bvec_y_tag = Tag(0x2005, 0x10b1)
bvec_z_tag = Tag(0x2005, 0x10b2)
for group in grouped_dicoms:
if bvec_x_tag in group[0] and _is_float(common.get_fl_value(group[0][bvec_x_tag])) and \
bvec_y_tag in group[0] and _is_float(common.get_fl_value(group[0][bvec_y_tag])) and \
bvec_z_tag in group[0] and _is_float(common.get_fl_value(group[0][bvec_z_tag])) and \
bval_tag in group[0] and _is_float(common.get_fl_value(group[0][bval_tag])) and \
common.get_fl_value(group[0][bval_tag]) != 0:
return True
return False | python | def _is_bval_type_a(grouped_dicoms):
bval_tag = Tag(0x2001, 0x1003)
bvec_x_tag = Tag(0x2005, 0x10b0)
bvec_y_tag = Tag(0x2005, 0x10b1)
bvec_z_tag = Tag(0x2005, 0x10b2)
for group in grouped_dicoms:
if bvec_x_tag in group[0] and _is_float(common.get_fl_value(group[0][bvec_x_tag])) and \
bvec_y_tag in group[0] and _is_float(common.get_fl_value(group[0][bvec_y_tag])) and \
bvec_z_tag in group[0] and _is_float(common.get_fl_value(group[0][bvec_z_tag])) and \
bval_tag in group[0] and _is_float(common.get_fl_value(group[0][bval_tag])) and \
common.get_fl_value(group[0][bval_tag]) != 0:
return True
return False | [
"def",
"_is_bval_type_a",
"(",
"grouped_dicoms",
")",
":",
"bval_tag",
"=",
"Tag",
"(",
"0x2001",
",",
"0x1003",
")",
"bvec_x_tag",
"=",
"Tag",
"(",
"0x2005",
",",
"0x10b0",
")",
"bvec_y_tag",
"=",
"Tag",
"(",
"0x2005",
",",
"0x10b1",
")",
"bvec_z_tag",
"=",
"Tag",
"(",
"0x2005",
",",
"0x10b2",
")",
"for",
"group",
"in",
"grouped_dicoms",
":",
"if",
"bvec_x_tag",
"in",
"group",
"[",
"0",
"]",
"and",
"_is_float",
"(",
"common",
".",
"get_fl_value",
"(",
"group",
"[",
"0",
"]",
"[",
"bvec_x_tag",
"]",
")",
")",
"and",
"bvec_y_tag",
"in",
"group",
"[",
"0",
"]",
"and",
"_is_float",
"(",
"common",
".",
"get_fl_value",
"(",
"group",
"[",
"0",
"]",
"[",
"bvec_y_tag",
"]",
")",
")",
"and",
"bvec_z_tag",
"in",
"group",
"[",
"0",
"]",
"and",
"_is_float",
"(",
"common",
".",
"get_fl_value",
"(",
"group",
"[",
"0",
"]",
"[",
"bvec_z_tag",
"]",
")",
")",
"and",
"bval_tag",
"in",
"group",
"[",
"0",
"]",
"and",
"_is_float",
"(",
"common",
".",
"get_fl_value",
"(",
"group",
"[",
"0",
"]",
"[",
"bval_tag",
"]",
")",
")",
"and",
"common",
".",
"get_fl_value",
"(",
"group",
"[",
"0",
"]",
"[",
"bval_tag",
"]",
")",
"!=",
"0",
":",
"return",
"True",
"return",
"False"
]
| Check if the bvals are stored in the first of 2 currently known ways for single frame dti | [
"Check",
"if",
"the",
"bvals",
"are",
"stored",
"in",
"the",
"first",
"of",
"2",
"currently",
"known",
"ways",
"for",
"single",
"frame",
"dti"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/convert_philips.py#L172-L187 |
icometrix/dicom2nifti | dicom2nifti/convert_philips.py | _is_bval_type_b | def _is_bval_type_b(grouped_dicoms):
"""
Check if the bvals are stored in the second of 2 currently known ways for single frame dti
"""
bval_tag = Tag(0x0018, 0x9087)
bvec_tag = Tag(0x0018, 0x9089)
for group in grouped_dicoms:
if bvec_tag in group[0] and bval_tag in group[0]:
bvec = common.get_fd_array_value(group[0][bvec_tag], 3)
bval = common.get_fd_value(group[0][bval_tag])
if _is_float(bvec[0]) and _is_float(bvec[1]) and _is_float(bvec[2]) and _is_float(bval) and bval != 0:
return True
return False | python | def _is_bval_type_b(grouped_dicoms):
bval_tag = Tag(0x0018, 0x9087)
bvec_tag = Tag(0x0018, 0x9089)
for group in grouped_dicoms:
if bvec_tag in group[0] and bval_tag in group[0]:
bvec = common.get_fd_array_value(group[0][bvec_tag], 3)
bval = common.get_fd_value(group[0][bval_tag])
if _is_float(bvec[0]) and _is_float(bvec[1]) and _is_float(bvec[2]) and _is_float(bval) and bval != 0:
return True
return False | [
"def",
"_is_bval_type_b",
"(",
"grouped_dicoms",
")",
":",
"bval_tag",
"=",
"Tag",
"(",
"0x0018",
",",
"0x9087",
")",
"bvec_tag",
"=",
"Tag",
"(",
"0x0018",
",",
"0x9089",
")",
"for",
"group",
"in",
"grouped_dicoms",
":",
"if",
"bvec_tag",
"in",
"group",
"[",
"0",
"]",
"and",
"bval_tag",
"in",
"group",
"[",
"0",
"]",
":",
"bvec",
"=",
"common",
".",
"get_fd_array_value",
"(",
"group",
"[",
"0",
"]",
"[",
"bvec_tag",
"]",
",",
"3",
")",
"bval",
"=",
"common",
".",
"get_fd_value",
"(",
"group",
"[",
"0",
"]",
"[",
"bval_tag",
"]",
")",
"if",
"_is_float",
"(",
"bvec",
"[",
"0",
"]",
")",
"and",
"_is_float",
"(",
"bvec",
"[",
"1",
"]",
")",
"and",
"_is_float",
"(",
"bvec",
"[",
"2",
"]",
")",
"and",
"_is_float",
"(",
"bval",
")",
"and",
"bval",
"!=",
"0",
":",
"return",
"True",
"return",
"False"
]
| Check if the bvals are stored in the second of 2 currently known ways for single frame dti | [
"Check",
"if",
"the",
"bvals",
"are",
"stored",
"in",
"the",
"second",
"of",
"2",
"currently",
"known",
"ways",
"for",
"single",
"frame",
"dti"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/convert_philips.py#L190-L202 |
icometrix/dicom2nifti | dicom2nifti/convert_philips.py | _multiframe_to_nifti | def _multiframe_to_nifti(dicom_input, output_file):
"""
This function will convert philips 4D or anatomical multiframe series to a nifti
"""
# Read the multiframe dicom file
logger.info('Read dicom file')
multiframe_dicom = dicom_input[0]
# Create mosaic block
logger.info('Creating data block')
full_block = _multiframe_to_block(multiframe_dicom)
logger.info('Creating affine')
# Create the nifti header info
affine = _create_affine_multiframe(multiframe_dicom)
logger.info('Creating nifti')
# Convert to nifti
nii_image = nibabel.Nifti1Image(full_block, affine)
timing_parameters = multiframe_dicom.SharedFunctionalGroupsSequence[0].MRTimingAndRelatedParametersSequence[0]
first_frame = multiframe_dicom[Tag(0x5200, 0x9230)][0]
common.set_tr_te(nii_image, float(timing_parameters.RepetitionTime),
float(first_frame[0x2005, 0x140f][0].EchoTime))
# Save to disk
if output_file is not None:
logger.info('Saving nifti to disk %s' % output_file)
nii_image.to_filename(output_file)
if _is_multiframe_diffusion_imaging(dicom_input):
bval_file = None
bvec_file = None
if output_file is not None:
# Create the bval en bvec files
base_path = os.path.dirname(output_file)
base_name = os.path.splitext(os.path.splitext(os.path.basename(output_file))[0])[0]
logger.info('Creating bval en bvec files')
bval_file = '%s/%s.bval' % (base_path, base_name)
bvec_file = '%s/%s.bvec' % (base_path, base_name)
bval, bvec, bval_file, bvec_file = _create_bvals_bvecs(multiframe_dicom, bval_file, bvec_file, nii_image,
output_file)
return {'NII_FILE': output_file,
'BVAL_FILE': bval_file,
'BVEC_FILE': bvec_file,
'NII': nii_image,
'BVAL': bval,
'BVEC': bvec}
return {'NII_FILE': output_file,
'NII': nii_image} | python | def _multiframe_to_nifti(dicom_input, output_file):
logger.info('Read dicom file')
multiframe_dicom = dicom_input[0]
logger.info('Creating data block')
full_block = _multiframe_to_block(multiframe_dicom)
logger.info('Creating affine')
affine = _create_affine_multiframe(multiframe_dicom)
logger.info('Creating nifti')
nii_image = nibabel.Nifti1Image(full_block, affine)
timing_parameters = multiframe_dicom.SharedFunctionalGroupsSequence[0].MRTimingAndRelatedParametersSequence[0]
first_frame = multiframe_dicom[Tag(0x5200, 0x9230)][0]
common.set_tr_te(nii_image, float(timing_parameters.RepetitionTime),
float(first_frame[0x2005, 0x140f][0].EchoTime))
if output_file is not None:
logger.info('Saving nifti to disk %s' % output_file)
nii_image.to_filename(output_file)
if _is_multiframe_diffusion_imaging(dicom_input):
bval_file = None
bvec_file = None
if output_file is not None:
base_path = os.path.dirname(output_file)
base_name = os.path.splitext(os.path.splitext(os.path.basename(output_file))[0])[0]
logger.info('Creating bval en bvec files')
bval_file = '%s/%s.bval' % (base_path, base_name)
bvec_file = '%s/%s.bvec' % (base_path, base_name)
bval, bvec, bval_file, bvec_file = _create_bvals_bvecs(multiframe_dicom, bval_file, bvec_file, nii_image,
output_file)
return {'NII_FILE': output_file,
'BVAL_FILE': bval_file,
'BVEC_FILE': bvec_file,
'NII': nii_image,
'BVAL': bval,
'BVEC': bvec}
return {'NII_FILE': output_file,
'NII': nii_image} | [
"def",
"_multiframe_to_nifti",
"(",
"dicom_input",
",",
"output_file",
")",
":",
"# Read the multiframe dicom file",
"logger",
".",
"info",
"(",
"'Read dicom file'",
")",
"multiframe_dicom",
"=",
"dicom_input",
"[",
"0",
"]",
"# Create mosaic block",
"logger",
".",
"info",
"(",
"'Creating data block'",
")",
"full_block",
"=",
"_multiframe_to_block",
"(",
"multiframe_dicom",
")",
"logger",
".",
"info",
"(",
"'Creating affine'",
")",
"# Create the nifti header info",
"affine",
"=",
"_create_affine_multiframe",
"(",
"multiframe_dicom",
")",
"logger",
".",
"info",
"(",
"'Creating nifti'",
")",
"# Convert to nifti",
"nii_image",
"=",
"nibabel",
".",
"Nifti1Image",
"(",
"full_block",
",",
"affine",
")",
"timing_parameters",
"=",
"multiframe_dicom",
".",
"SharedFunctionalGroupsSequence",
"[",
"0",
"]",
".",
"MRTimingAndRelatedParametersSequence",
"[",
"0",
"]",
"first_frame",
"=",
"multiframe_dicom",
"[",
"Tag",
"(",
"0x5200",
",",
"0x9230",
")",
"]",
"[",
"0",
"]",
"common",
".",
"set_tr_te",
"(",
"nii_image",
",",
"float",
"(",
"timing_parameters",
".",
"RepetitionTime",
")",
",",
"float",
"(",
"first_frame",
"[",
"0x2005",
",",
"0x140f",
"]",
"[",
"0",
"]",
".",
"EchoTime",
")",
")",
"# Save to disk",
"if",
"output_file",
"is",
"not",
"None",
":",
"logger",
".",
"info",
"(",
"'Saving nifti to disk %s'",
"%",
"output_file",
")",
"nii_image",
".",
"to_filename",
"(",
"output_file",
")",
"if",
"_is_multiframe_diffusion_imaging",
"(",
"dicom_input",
")",
":",
"bval_file",
"=",
"None",
"bvec_file",
"=",
"None",
"if",
"output_file",
"is",
"not",
"None",
":",
"# Create the bval en bvec files",
"base_path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"output_file",
")",
"base_name",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"output_file",
")",
")",
"[",
"0",
"]",
")",
"[",
"0",
"]",
"logger",
".",
"info",
"(",
"'Creating bval en bvec files'",
")",
"bval_file",
"=",
"'%s/%s.bval'",
"%",
"(",
"base_path",
",",
"base_name",
")",
"bvec_file",
"=",
"'%s/%s.bvec'",
"%",
"(",
"base_path",
",",
"base_name",
")",
"bval",
",",
"bvec",
",",
"bval_file",
",",
"bvec_file",
"=",
"_create_bvals_bvecs",
"(",
"multiframe_dicom",
",",
"bval_file",
",",
"bvec_file",
",",
"nii_image",
",",
"output_file",
")",
"return",
"{",
"'NII_FILE'",
":",
"output_file",
",",
"'BVAL_FILE'",
":",
"bval_file",
",",
"'BVEC_FILE'",
":",
"bvec_file",
",",
"'NII'",
":",
"nii_image",
",",
"'BVAL'",
":",
"bval",
",",
"'BVEC'",
":",
"bvec",
"}",
"return",
"{",
"'NII_FILE'",
":",
"output_file",
",",
"'NII'",
":",
"nii_image",
"}"
]
| This function will convert philips 4D or anatomical multiframe series to a nifti | [
"This",
"function",
"will",
"convert",
"philips",
"4D",
"or",
"anatomical",
"multiframe",
"series",
"to",
"a",
"nifti"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/convert_philips.py#L216-L268 |
icometrix/dicom2nifti | dicom2nifti/convert_philips.py | _singleframe_to_nifti | def _singleframe_to_nifti(grouped_dicoms, output_file):
"""
This function will convert a philips singleframe series to a nifti
"""
# Create mosaic block
logger.info('Creating data block')
full_block = _singleframe_to_block(grouped_dicoms)
logger.info('Creating affine')
# Create the nifti header info
affine, slice_increment = common.create_affine(grouped_dicoms[0])
logger.info('Creating nifti')
# Convert to nifti
nii_image = nibabel.Nifti1Image(full_block, affine)
common.set_tr_te(nii_image, float(grouped_dicoms[0][0].RepetitionTime), float(grouped_dicoms[0][0].EchoTime))
if output_file is not None:
# Save to disk
logger.info('Saving nifti to disk %s' % output_file)
nii_image.to_filename(output_file)
if _is_singleframe_diffusion_imaging(grouped_dicoms):
bval_file = None
bvec_file = None
# Create the bval en bvec files
if output_file is not None:
base_name = os.path.splitext(output_file)[0]
if base_name.endswith('.nii'):
base_name = os.path.splitext(base_name)[0]
logger.info('Creating bval en bvec files')
bval_file = '%s.bval' % base_name
bvec_file = '%s.bvec' % base_name
nii_image, bval, bvec, bval_file, bvec_file = _create_singleframe_bvals_bvecs(grouped_dicoms,
bval_file,
bvec_file,
nii_image,
output_file)
return {'NII_FILE': output_file,
'BVAL_FILE': bval_file,
'BVEC_FILE': bvec_file,
'NII': nii_image,
'BVAL': bval,
'BVEC': bvec,
'MAX_SLICE_INCREMENT': slice_increment}
return {'NII_FILE': output_file,
'NII': nii_image,
'MAX_SLICE_INCREMENT': slice_increment} | python | def _singleframe_to_nifti(grouped_dicoms, output_file):
logger.info('Creating data block')
full_block = _singleframe_to_block(grouped_dicoms)
logger.info('Creating affine')
affine, slice_increment = common.create_affine(grouped_dicoms[0])
logger.info('Creating nifti')
nii_image = nibabel.Nifti1Image(full_block, affine)
common.set_tr_te(nii_image, float(grouped_dicoms[0][0].RepetitionTime), float(grouped_dicoms[0][0].EchoTime))
if output_file is not None:
logger.info('Saving nifti to disk %s' % output_file)
nii_image.to_filename(output_file)
if _is_singleframe_diffusion_imaging(grouped_dicoms):
bval_file = None
bvec_file = None
if output_file is not None:
base_name = os.path.splitext(output_file)[0]
if base_name.endswith('.nii'):
base_name = os.path.splitext(base_name)[0]
logger.info('Creating bval en bvec files')
bval_file = '%s.bval' % base_name
bvec_file = '%s.bvec' % base_name
nii_image, bval, bvec, bval_file, bvec_file = _create_singleframe_bvals_bvecs(grouped_dicoms,
bval_file,
bvec_file,
nii_image,
output_file)
return {'NII_FILE': output_file,
'BVAL_FILE': bval_file,
'BVEC_FILE': bvec_file,
'NII': nii_image,
'BVAL': bval,
'BVEC': bvec,
'MAX_SLICE_INCREMENT': slice_increment}
return {'NII_FILE': output_file,
'NII': nii_image,
'MAX_SLICE_INCREMENT': slice_increment} | [
"def",
"_singleframe_to_nifti",
"(",
"grouped_dicoms",
",",
"output_file",
")",
":",
"# Create mosaic block",
"logger",
".",
"info",
"(",
"'Creating data block'",
")",
"full_block",
"=",
"_singleframe_to_block",
"(",
"grouped_dicoms",
")",
"logger",
".",
"info",
"(",
"'Creating affine'",
")",
"# Create the nifti header info",
"affine",
",",
"slice_increment",
"=",
"common",
".",
"create_affine",
"(",
"grouped_dicoms",
"[",
"0",
"]",
")",
"logger",
".",
"info",
"(",
"'Creating nifti'",
")",
"# Convert to nifti",
"nii_image",
"=",
"nibabel",
".",
"Nifti1Image",
"(",
"full_block",
",",
"affine",
")",
"common",
".",
"set_tr_te",
"(",
"nii_image",
",",
"float",
"(",
"grouped_dicoms",
"[",
"0",
"]",
"[",
"0",
"]",
".",
"RepetitionTime",
")",
",",
"float",
"(",
"grouped_dicoms",
"[",
"0",
"]",
"[",
"0",
"]",
".",
"EchoTime",
")",
")",
"if",
"output_file",
"is",
"not",
"None",
":",
"# Save to disk",
"logger",
".",
"info",
"(",
"'Saving nifti to disk %s'",
"%",
"output_file",
")",
"nii_image",
".",
"to_filename",
"(",
"output_file",
")",
"if",
"_is_singleframe_diffusion_imaging",
"(",
"grouped_dicoms",
")",
":",
"bval_file",
"=",
"None",
"bvec_file",
"=",
"None",
"# Create the bval en bvec files",
"if",
"output_file",
"is",
"not",
"None",
":",
"base_name",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"output_file",
")",
"[",
"0",
"]",
"if",
"base_name",
".",
"endswith",
"(",
"'.nii'",
")",
":",
"base_name",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"base_name",
")",
"[",
"0",
"]",
"logger",
".",
"info",
"(",
"'Creating bval en bvec files'",
")",
"bval_file",
"=",
"'%s.bval'",
"%",
"base_name",
"bvec_file",
"=",
"'%s.bvec'",
"%",
"base_name",
"nii_image",
",",
"bval",
",",
"bvec",
",",
"bval_file",
",",
"bvec_file",
"=",
"_create_singleframe_bvals_bvecs",
"(",
"grouped_dicoms",
",",
"bval_file",
",",
"bvec_file",
",",
"nii_image",
",",
"output_file",
")",
"return",
"{",
"'NII_FILE'",
":",
"output_file",
",",
"'BVAL_FILE'",
":",
"bval_file",
",",
"'BVEC_FILE'",
":",
"bvec_file",
",",
"'NII'",
":",
"nii_image",
",",
"'BVAL'",
":",
"bval",
",",
"'BVEC'",
":",
"bvec",
",",
"'MAX_SLICE_INCREMENT'",
":",
"slice_increment",
"}",
"return",
"{",
"'NII_FILE'",
":",
"output_file",
",",
"'NII'",
":",
"nii_image",
",",
"'MAX_SLICE_INCREMENT'",
":",
"slice_increment",
"}"
]
| This function will convert a philips singleframe series to a nifti | [
"This",
"function",
"will",
"convert",
"a",
"philips",
"singleframe",
"series",
"to",
"a",
"nifti"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/convert_philips.py#L271-L322 |
icometrix/dicom2nifti | dicom2nifti/convert_philips.py | _singleframe_to_block | def _singleframe_to_block(grouped_dicoms):
"""
Generate a full datablock containing all timepoints
"""
# For each slice / mosaic create a data volume block
data_blocks = []
for index in range(0, len(grouped_dicoms)):
logger.info('Creating block %s of %s' % (index + 1, len(grouped_dicoms)))
current_block = _stack_to_block(grouped_dicoms[index])
current_block = current_block[:, :, :, numpy.newaxis]
data_blocks.append(current_block)
try:
full_block = numpy.concatenate(data_blocks, axis=3)
except:
traceback.print_exc()
raise ConversionError("MISSING_DICOM_FILES")
# Apply the rescaling if needed
common.apply_scaling(full_block, grouped_dicoms[0][0])
return full_block | python | def _singleframe_to_block(grouped_dicoms):
data_blocks = []
for index in range(0, len(grouped_dicoms)):
logger.info('Creating block %s of %s' % (index + 1, len(grouped_dicoms)))
current_block = _stack_to_block(grouped_dicoms[index])
current_block = current_block[:, :, :, numpy.newaxis]
data_blocks.append(current_block)
try:
full_block = numpy.concatenate(data_blocks, axis=3)
except:
traceback.print_exc()
raise ConversionError("MISSING_DICOM_FILES")
common.apply_scaling(full_block, grouped_dicoms[0][0])
return full_block | [
"def",
"_singleframe_to_block",
"(",
"grouped_dicoms",
")",
":",
"# For each slice / mosaic create a data volume block",
"data_blocks",
"=",
"[",
"]",
"for",
"index",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"grouped_dicoms",
")",
")",
":",
"logger",
".",
"info",
"(",
"'Creating block %s of %s'",
"%",
"(",
"index",
"+",
"1",
",",
"len",
"(",
"grouped_dicoms",
")",
")",
")",
"current_block",
"=",
"_stack_to_block",
"(",
"grouped_dicoms",
"[",
"index",
"]",
")",
"current_block",
"=",
"current_block",
"[",
":",
",",
":",
",",
":",
",",
"numpy",
".",
"newaxis",
"]",
"data_blocks",
".",
"append",
"(",
"current_block",
")",
"try",
":",
"full_block",
"=",
"numpy",
".",
"concatenate",
"(",
"data_blocks",
",",
"axis",
"=",
"3",
")",
"except",
":",
"traceback",
".",
"print_exc",
"(",
")",
"raise",
"ConversionError",
"(",
"\"MISSING_DICOM_FILES\"",
")",
"# Apply the rescaling if needed",
"common",
".",
"apply_scaling",
"(",
"full_block",
",",
"grouped_dicoms",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"return",
"full_block"
]
| Generate a full datablock containing all timepoints | [
"Generate",
"a",
"full",
"datablock",
"containing",
"all",
"timepoints"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/convert_philips.py#L325-L346 |
icometrix/dicom2nifti | dicom2nifti/convert_philips.py | _get_grouped_dicoms | def _get_grouped_dicoms(dicom_input):
"""
Search all dicoms in the dicom directory, sort and validate them
fast_read = True will only read the headers not the data
"""
# if all dicoms have an instance number try sorting by instance number else by position
if [d for d in dicom_input if 'InstanceNumber' in d]:
dicoms = sorted(dicom_input, key=lambda x: x.InstanceNumber)
else:
dicoms = common.sort_dicoms(dicom_input)
# now group per stack
grouped_dicoms = [[]] # list with first element a list
timepoint_index = 0
previous_stack_position = -1
# loop over all sorted dicoms
stack_position_tag = Tag(0x2001, 0x100a) # put this there as this is a slow step and used a lot
for index in range(0, len(dicoms)):
dicom_ = dicoms[index]
stack_position = 0
if stack_position_tag in dicom_:
stack_position = common.get_is_value(dicom_[stack_position_tag])
if previous_stack_position == stack_position:
# if the stack number is the same we move to the next timepoint
timepoint_index += 1
if len(grouped_dicoms) <= timepoint_index:
grouped_dicoms.append([])
else:
# if it changes move back to the first timepoint
timepoint_index = 0
grouped_dicoms[timepoint_index].append(dicom_)
previous_stack_position = stack_position
return grouped_dicoms | python | def _get_grouped_dicoms(dicom_input):
if [d for d in dicom_input if 'InstanceNumber' in d]:
dicoms = sorted(dicom_input, key=lambda x: x.InstanceNumber)
else:
dicoms = common.sort_dicoms(dicom_input)
grouped_dicoms = [[]]
timepoint_index = 0
previous_stack_position = -1
stack_position_tag = Tag(0x2001, 0x100a)
for index in range(0, len(dicoms)):
dicom_ = dicoms[index]
stack_position = 0
if stack_position_tag in dicom_:
stack_position = common.get_is_value(dicom_[stack_position_tag])
if previous_stack_position == stack_position:
timepoint_index += 1
if len(grouped_dicoms) <= timepoint_index:
grouped_dicoms.append([])
else:
timepoint_index = 0
grouped_dicoms[timepoint_index].append(dicom_)
previous_stack_position = stack_position
return grouped_dicoms | [
"def",
"_get_grouped_dicoms",
"(",
"dicom_input",
")",
":",
"# if all dicoms have an instance number try sorting by instance number else by position",
"if",
"[",
"d",
"for",
"d",
"in",
"dicom_input",
"if",
"'InstanceNumber'",
"in",
"d",
"]",
":",
"dicoms",
"=",
"sorted",
"(",
"dicom_input",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
".",
"InstanceNumber",
")",
"else",
":",
"dicoms",
"=",
"common",
".",
"sort_dicoms",
"(",
"dicom_input",
")",
"# now group per stack",
"grouped_dicoms",
"=",
"[",
"[",
"]",
"]",
"# list with first element a list",
"timepoint_index",
"=",
"0",
"previous_stack_position",
"=",
"-",
"1",
"# loop over all sorted dicoms",
"stack_position_tag",
"=",
"Tag",
"(",
"0x2001",
",",
"0x100a",
")",
"# put this there as this is a slow step and used a lot",
"for",
"index",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"dicoms",
")",
")",
":",
"dicom_",
"=",
"dicoms",
"[",
"index",
"]",
"stack_position",
"=",
"0",
"if",
"stack_position_tag",
"in",
"dicom_",
":",
"stack_position",
"=",
"common",
".",
"get_is_value",
"(",
"dicom_",
"[",
"stack_position_tag",
"]",
")",
"if",
"previous_stack_position",
"==",
"stack_position",
":",
"# if the stack number is the same we move to the next timepoint",
"timepoint_index",
"+=",
"1",
"if",
"len",
"(",
"grouped_dicoms",
")",
"<=",
"timepoint_index",
":",
"grouped_dicoms",
".",
"append",
"(",
"[",
"]",
")",
"else",
":",
"# if it changes move back to the first timepoint",
"timepoint_index",
"=",
"0",
"grouped_dicoms",
"[",
"timepoint_index",
"]",
".",
"append",
"(",
"dicom_",
")",
"previous_stack_position",
"=",
"stack_position",
"return",
"grouped_dicoms"
]
| Search all dicoms in the dicom directory, sort and validate them
fast_read = True will only read the headers not the data | [
"Search",
"all",
"dicoms",
"in",
"the",
"dicom",
"directory",
"sort",
"and",
"validate",
"them"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/convert_philips.py#L356-L390 |
icometrix/dicom2nifti | dicom2nifti/convert_philips.py | _create_affine_multiframe | def _create_affine_multiframe(multiframe_dicom):
"""
Function to create the affine matrix for a siemens mosaic dataset
This will work for siemens dti and 4D if in mosaic format
"""
first_frame = multiframe_dicom[Tag(0x5200, 0x9230)][0]
last_frame = multiframe_dicom[Tag(0x5200, 0x9230)][-1]
# Create affine matrix (http://nipy.sourceforge.net/nibabel/dicom/dicom_orientation.html#dicom-slice-affine)
image_orient1 = numpy.array(first_frame.PlaneOrientationSequence[0].ImageOrientationPatient)[0:3].astype(float)
image_orient2 = numpy.array(first_frame.PlaneOrientationSequence[0].ImageOrientationPatient)[3:6].astype(float)
normal = numpy.cross(image_orient1, image_orient2)
delta_r = float(first_frame[0x2005, 0x140f][0].PixelSpacing[0])
delta_c = float(first_frame[0x2005, 0x140f][0].PixelSpacing[1])
image_pos = numpy.array(first_frame.PlanePositionSequence[0].ImagePositionPatient).astype(float)
last_image_pos = numpy.array(last_frame.PlanePositionSequence[0].ImagePositionPatient).astype(float)
number_of_stack_slices = int(common.get_ss_value(multiframe_dicom[Tag(0x2001, 0x105f)][0][Tag(0x2001, 0x102d)]))
delta_s = abs(numpy.linalg.norm(last_image_pos - image_pos)) / (number_of_stack_slices - 1)
return numpy.array(
[[-image_orient1[0] * delta_c, -image_orient2[0] * delta_r, -delta_s * normal[0], -image_pos[0]],
[-image_orient1[1] * delta_c, -image_orient2[1] * delta_r, -delta_s * normal[1], -image_pos[1]],
[image_orient1[2] * delta_c, image_orient2[2] * delta_r, delta_s * normal[2], image_pos[2]],
[0, 0, 0, 1]]) | python | def _create_affine_multiframe(multiframe_dicom):
first_frame = multiframe_dicom[Tag(0x5200, 0x9230)][0]
last_frame = multiframe_dicom[Tag(0x5200, 0x9230)][-1]
image_orient1 = numpy.array(first_frame.PlaneOrientationSequence[0].ImageOrientationPatient)[0:3].astype(float)
image_orient2 = numpy.array(first_frame.PlaneOrientationSequence[0].ImageOrientationPatient)[3:6].astype(float)
normal = numpy.cross(image_orient1, image_orient2)
delta_r = float(first_frame[0x2005, 0x140f][0].PixelSpacing[0])
delta_c = float(first_frame[0x2005, 0x140f][0].PixelSpacing[1])
image_pos = numpy.array(first_frame.PlanePositionSequence[0].ImagePositionPatient).astype(float)
last_image_pos = numpy.array(last_frame.PlanePositionSequence[0].ImagePositionPatient).astype(float)
number_of_stack_slices = int(common.get_ss_value(multiframe_dicom[Tag(0x2001, 0x105f)][0][Tag(0x2001, 0x102d)]))
delta_s = abs(numpy.linalg.norm(last_image_pos - image_pos)) / (number_of_stack_slices - 1)
return numpy.array(
[[-image_orient1[0] * delta_c, -image_orient2[0] * delta_r, -delta_s * normal[0], -image_pos[0]],
[-image_orient1[1] * delta_c, -image_orient2[1] * delta_r, -delta_s * normal[1], -image_pos[1]],
[image_orient1[2] * delta_c, image_orient2[2] * delta_r, delta_s * normal[2], image_pos[2]],
[0, 0, 0, 1]]) | [
"def",
"_create_affine_multiframe",
"(",
"multiframe_dicom",
")",
":",
"first_frame",
"=",
"multiframe_dicom",
"[",
"Tag",
"(",
"0x5200",
",",
"0x9230",
")",
"]",
"[",
"0",
"]",
"last_frame",
"=",
"multiframe_dicom",
"[",
"Tag",
"(",
"0x5200",
",",
"0x9230",
")",
"]",
"[",
"-",
"1",
"]",
"# Create affine matrix (http://nipy.sourceforge.net/nibabel/dicom/dicom_orientation.html#dicom-slice-affine)",
"image_orient1",
"=",
"numpy",
".",
"array",
"(",
"first_frame",
".",
"PlaneOrientationSequence",
"[",
"0",
"]",
".",
"ImageOrientationPatient",
")",
"[",
"0",
":",
"3",
"]",
".",
"astype",
"(",
"float",
")",
"image_orient2",
"=",
"numpy",
".",
"array",
"(",
"first_frame",
".",
"PlaneOrientationSequence",
"[",
"0",
"]",
".",
"ImageOrientationPatient",
")",
"[",
"3",
":",
"6",
"]",
".",
"astype",
"(",
"float",
")",
"normal",
"=",
"numpy",
".",
"cross",
"(",
"image_orient1",
",",
"image_orient2",
")",
"delta_r",
"=",
"float",
"(",
"first_frame",
"[",
"0x2005",
",",
"0x140f",
"]",
"[",
"0",
"]",
".",
"PixelSpacing",
"[",
"0",
"]",
")",
"delta_c",
"=",
"float",
"(",
"first_frame",
"[",
"0x2005",
",",
"0x140f",
"]",
"[",
"0",
"]",
".",
"PixelSpacing",
"[",
"1",
"]",
")",
"image_pos",
"=",
"numpy",
".",
"array",
"(",
"first_frame",
".",
"PlanePositionSequence",
"[",
"0",
"]",
".",
"ImagePositionPatient",
")",
".",
"astype",
"(",
"float",
")",
"last_image_pos",
"=",
"numpy",
".",
"array",
"(",
"last_frame",
".",
"PlanePositionSequence",
"[",
"0",
"]",
".",
"ImagePositionPatient",
")",
".",
"astype",
"(",
"float",
")",
"number_of_stack_slices",
"=",
"int",
"(",
"common",
".",
"get_ss_value",
"(",
"multiframe_dicom",
"[",
"Tag",
"(",
"0x2001",
",",
"0x105f",
")",
"]",
"[",
"0",
"]",
"[",
"Tag",
"(",
"0x2001",
",",
"0x102d",
")",
"]",
")",
")",
"delta_s",
"=",
"abs",
"(",
"numpy",
".",
"linalg",
".",
"norm",
"(",
"last_image_pos",
"-",
"image_pos",
")",
")",
"/",
"(",
"number_of_stack_slices",
"-",
"1",
")",
"return",
"numpy",
".",
"array",
"(",
"[",
"[",
"-",
"image_orient1",
"[",
"0",
"]",
"*",
"delta_c",
",",
"-",
"image_orient2",
"[",
"0",
"]",
"*",
"delta_r",
",",
"-",
"delta_s",
"*",
"normal",
"[",
"0",
"]",
",",
"-",
"image_pos",
"[",
"0",
"]",
"]",
",",
"[",
"-",
"image_orient1",
"[",
"1",
"]",
"*",
"delta_c",
",",
"-",
"image_orient2",
"[",
"1",
"]",
"*",
"delta_r",
",",
"-",
"delta_s",
"*",
"normal",
"[",
"1",
"]",
",",
"-",
"image_pos",
"[",
"1",
"]",
"]",
",",
"[",
"image_orient1",
"[",
"2",
"]",
"*",
"delta_c",
",",
"image_orient2",
"[",
"2",
"]",
"*",
"delta_r",
",",
"delta_s",
"*",
"normal",
"[",
"2",
"]",
",",
"image_pos",
"[",
"2",
"]",
"]",
",",
"[",
"0",
",",
"0",
",",
"0",
",",
"1",
"]",
"]",
")"
]
| Function to create the affine matrix for a siemens mosaic dataset
This will work for siemens dti and 4D if in mosaic format | [
"Function",
"to",
"create",
"the",
"affine",
"matrix",
"for",
"a",
"siemens",
"mosaic",
"dataset",
"This",
"will",
"work",
"for",
"siemens",
"dti",
"and",
"4D",
"if",
"in",
"mosaic",
"format"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/convert_philips.py#L393-L419 |
icometrix/dicom2nifti | dicom2nifti/convert_philips.py | _multiframe_to_block | def _multiframe_to_block(multiframe_dicom):
"""
Generate a full datablock containing all stacks
"""
# Calculate the amount of stacks and slices in the stack
number_of_stack_slices = int(common.get_ss_value(multiframe_dicom[Tag(0x2001, 0x105f)][0][Tag(0x2001, 0x102d)]))
number_of_stacks = int(int(multiframe_dicom.NumberOfFrames) / number_of_stack_slices)
# We create a numpy array
size_x = multiframe_dicom.pixel_array.shape[2]
size_y = multiframe_dicom.pixel_array.shape[1]
size_z = number_of_stack_slices
size_t = number_of_stacks
# get the format
format_string = common.get_numpy_type(multiframe_dicom)
# get header info needed for ordering
frame_info = multiframe_dicom[0x5200, 0x9230]
data_4d = numpy.zeros((size_z, size_y, size_x, size_t), dtype=format_string)
# loop over each slice and insert in datablock
t_location_index = _get_t_position_index(multiframe_dicom)
for slice_index in range(0, size_t * size_z):
z_location = frame_info[slice_index].FrameContentSequence[0].InStackPositionNumber - 1
if t_location_index is None:
t_location = frame_info[slice_index].FrameContentSequence[0].TemporalPositionIndex - 1
else:
t_location = frame_info[slice_index].FrameContentSequence[0].DimensionIndexValues[t_location_index] - 1
block_data = multiframe_dicom.pixel_array[slice_index, :, :]
# apply scaling
rescale_intercept = frame_info[slice_index].PixelValueTransformationSequence[0].RescaleIntercept
rescale_slope = frame_info[slice_index].PixelValueTransformationSequence[0].RescaleSlope
block_data = common.do_scaling(block_data,
rescale_slope, rescale_intercept)
# switch to float if needed
if block_data.dtype != data_4d.dtype:
data_4d = data_4d.astype(block_data.dtype)
data_4d[z_location, :, :, t_location] = block_data
full_block = numpy.zeros((size_x, size_y, size_z, size_t), dtype=data_4d.dtype)
# loop over each stack and reorganize the data
for t_index in range(0, size_t):
# transpose the block so the directions are correct
data_3d = numpy.transpose(data_4d[:, :, :, t_index], (2, 1, 0))
# add the block the the full data
full_block[:, :, :, t_index] = data_3d
return full_block | python | def _multiframe_to_block(multiframe_dicom):
number_of_stack_slices = int(common.get_ss_value(multiframe_dicom[Tag(0x2001, 0x105f)][0][Tag(0x2001, 0x102d)]))
number_of_stacks = int(int(multiframe_dicom.NumberOfFrames) / number_of_stack_slices)
size_x = multiframe_dicom.pixel_array.shape[2]
size_y = multiframe_dicom.pixel_array.shape[1]
size_z = number_of_stack_slices
size_t = number_of_stacks
format_string = common.get_numpy_type(multiframe_dicom)
frame_info = multiframe_dicom[0x5200, 0x9230]
data_4d = numpy.zeros((size_z, size_y, size_x, size_t), dtype=format_string)
t_location_index = _get_t_position_index(multiframe_dicom)
for slice_index in range(0, size_t * size_z):
z_location = frame_info[slice_index].FrameContentSequence[0].InStackPositionNumber - 1
if t_location_index is None:
t_location = frame_info[slice_index].FrameContentSequence[0].TemporalPositionIndex - 1
else:
t_location = frame_info[slice_index].FrameContentSequence[0].DimensionIndexValues[t_location_index] - 1
block_data = multiframe_dicom.pixel_array[slice_index, :, :]
rescale_intercept = frame_info[slice_index].PixelValueTransformationSequence[0].RescaleIntercept
rescale_slope = frame_info[slice_index].PixelValueTransformationSequence[0].RescaleSlope
block_data = common.do_scaling(block_data,
rescale_slope, rescale_intercept)
if block_data.dtype != data_4d.dtype:
data_4d = data_4d.astype(block_data.dtype)
data_4d[z_location, :, :, t_location] = block_data
full_block = numpy.zeros((size_x, size_y, size_z, size_t), dtype=data_4d.dtype)
for t_index in range(0, size_t):
data_3d = numpy.transpose(data_4d[:, :, :, t_index], (2, 1, 0))
full_block[:, :, :, t_index] = data_3d
return full_block | [
"def",
"_multiframe_to_block",
"(",
"multiframe_dicom",
")",
":",
"# Calculate the amount of stacks and slices in the stack",
"number_of_stack_slices",
"=",
"int",
"(",
"common",
".",
"get_ss_value",
"(",
"multiframe_dicom",
"[",
"Tag",
"(",
"0x2001",
",",
"0x105f",
")",
"]",
"[",
"0",
"]",
"[",
"Tag",
"(",
"0x2001",
",",
"0x102d",
")",
"]",
")",
")",
"number_of_stacks",
"=",
"int",
"(",
"int",
"(",
"multiframe_dicom",
".",
"NumberOfFrames",
")",
"/",
"number_of_stack_slices",
")",
"# We create a numpy array",
"size_x",
"=",
"multiframe_dicom",
".",
"pixel_array",
".",
"shape",
"[",
"2",
"]",
"size_y",
"=",
"multiframe_dicom",
".",
"pixel_array",
".",
"shape",
"[",
"1",
"]",
"size_z",
"=",
"number_of_stack_slices",
"size_t",
"=",
"number_of_stacks",
"# get the format",
"format_string",
"=",
"common",
".",
"get_numpy_type",
"(",
"multiframe_dicom",
")",
"# get header info needed for ordering",
"frame_info",
"=",
"multiframe_dicom",
"[",
"0x5200",
",",
"0x9230",
"]",
"data_4d",
"=",
"numpy",
".",
"zeros",
"(",
"(",
"size_z",
",",
"size_y",
",",
"size_x",
",",
"size_t",
")",
",",
"dtype",
"=",
"format_string",
")",
"# loop over each slice and insert in datablock",
"t_location_index",
"=",
"_get_t_position_index",
"(",
"multiframe_dicom",
")",
"for",
"slice_index",
"in",
"range",
"(",
"0",
",",
"size_t",
"*",
"size_z",
")",
":",
"z_location",
"=",
"frame_info",
"[",
"slice_index",
"]",
".",
"FrameContentSequence",
"[",
"0",
"]",
".",
"InStackPositionNumber",
"-",
"1",
"if",
"t_location_index",
"is",
"None",
":",
"t_location",
"=",
"frame_info",
"[",
"slice_index",
"]",
".",
"FrameContentSequence",
"[",
"0",
"]",
".",
"TemporalPositionIndex",
"-",
"1",
"else",
":",
"t_location",
"=",
"frame_info",
"[",
"slice_index",
"]",
".",
"FrameContentSequence",
"[",
"0",
"]",
".",
"DimensionIndexValues",
"[",
"t_location_index",
"]",
"-",
"1",
"block_data",
"=",
"multiframe_dicom",
".",
"pixel_array",
"[",
"slice_index",
",",
":",
",",
":",
"]",
"# apply scaling",
"rescale_intercept",
"=",
"frame_info",
"[",
"slice_index",
"]",
".",
"PixelValueTransformationSequence",
"[",
"0",
"]",
".",
"RescaleIntercept",
"rescale_slope",
"=",
"frame_info",
"[",
"slice_index",
"]",
".",
"PixelValueTransformationSequence",
"[",
"0",
"]",
".",
"RescaleSlope",
"block_data",
"=",
"common",
".",
"do_scaling",
"(",
"block_data",
",",
"rescale_slope",
",",
"rescale_intercept",
")",
"# switch to float if needed",
"if",
"block_data",
".",
"dtype",
"!=",
"data_4d",
".",
"dtype",
":",
"data_4d",
"=",
"data_4d",
".",
"astype",
"(",
"block_data",
".",
"dtype",
")",
"data_4d",
"[",
"z_location",
",",
":",
",",
":",
",",
"t_location",
"]",
"=",
"block_data",
"full_block",
"=",
"numpy",
".",
"zeros",
"(",
"(",
"size_x",
",",
"size_y",
",",
"size_z",
",",
"size_t",
")",
",",
"dtype",
"=",
"data_4d",
".",
"dtype",
")",
"# loop over each stack and reorganize the data",
"for",
"t_index",
"in",
"range",
"(",
"0",
",",
"size_t",
")",
":",
"# transpose the block so the directions are correct",
"data_3d",
"=",
"numpy",
".",
"transpose",
"(",
"data_4d",
"[",
":",
",",
":",
",",
":",
",",
"t_index",
"]",
",",
"(",
"2",
",",
"1",
",",
"0",
")",
")",
"# add the block the the full data",
"full_block",
"[",
":",
",",
":",
",",
":",
",",
"t_index",
"]",
"=",
"data_3d",
"return",
"full_block"
]
| Generate a full datablock containing all stacks | [
"Generate",
"a",
"full",
"datablock",
"containing",
"all",
"stacks"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/convert_philips.py#L422-L473 |
icometrix/dicom2nifti | dicom2nifti/convert_philips.py | _create_bvals_bvecs | def _create_bvals_bvecs(multiframe_dicom, bval_file, bvec_file, nifti, nifti_file):
"""
Write the bvals from the sorted dicom files to a bval file
Inspired by https://github.com/IBIC/ibicUtils/blob/master/ibicBvalsBvecs.py
"""
# create the empty arrays
number_of_stack_slices = common.get_ss_value(multiframe_dicom[Tag(0x2001, 0x105f)][0][Tag(0x2001, 0x102d)])
number_of_stacks = int(int(multiframe_dicom.NumberOfFrames) / number_of_stack_slices)
bvals = numpy.zeros([number_of_stacks], dtype=numpy.int32)
bvecs = numpy.zeros([number_of_stacks, 3])
# loop over all timepoints and create a list with all bvals and bvecs
for stack_index in range(0, number_of_stacks):
stack = multiframe_dicom[Tag(0x5200, 0x9230)][stack_index]
if str(stack[Tag(0x0018, 0x9117)][0][Tag(0x0018, 0x9075)].value) == 'DIRECTIONAL':
bvals[stack_index] = common.get_fd_value(stack[Tag(0x0018, 0x9117)][0][Tag(0x0018, 0x9087)])
bvecs[stack_index, :] = common.get_fd_array_value(stack[Tag(0x0018, 0x9117)][0]
[Tag(0x0018, 0x9076)][0][Tag(0x0018, 0x9089)], 3)
# truncate nifti if needed
nifti, bvals, bvecs = _fix_diffusion_images(bvals, bvecs, nifti, nifti_file)
# save the found bvecs to the file
if numpy.count_nonzero(bvals) > 0 or numpy.count_nonzero(bvecs) > 0:
common.write_bval_file(bvals, bval_file)
common.write_bvec_file(bvecs, bvec_file)
else:
bval_file = None
bvec_file = None
bvals = None
bvecs = None
return bvals, bvecs, bval_file, bvec_file | python | def _create_bvals_bvecs(multiframe_dicom, bval_file, bvec_file, nifti, nifti_file):
number_of_stack_slices = common.get_ss_value(multiframe_dicom[Tag(0x2001, 0x105f)][0][Tag(0x2001, 0x102d)])
number_of_stacks = int(int(multiframe_dicom.NumberOfFrames) / number_of_stack_slices)
bvals = numpy.zeros([number_of_stacks], dtype=numpy.int32)
bvecs = numpy.zeros([number_of_stacks, 3])
for stack_index in range(0, number_of_stacks):
stack = multiframe_dicom[Tag(0x5200, 0x9230)][stack_index]
if str(stack[Tag(0x0018, 0x9117)][0][Tag(0x0018, 0x9075)].value) == 'DIRECTIONAL':
bvals[stack_index] = common.get_fd_value(stack[Tag(0x0018, 0x9117)][0][Tag(0x0018, 0x9087)])
bvecs[stack_index, :] = common.get_fd_array_value(stack[Tag(0x0018, 0x9117)][0]
[Tag(0x0018, 0x9076)][0][Tag(0x0018, 0x9089)], 3)
nifti, bvals, bvecs = _fix_diffusion_images(bvals, bvecs, nifti, nifti_file)
if numpy.count_nonzero(bvals) > 0 or numpy.count_nonzero(bvecs) > 0:
common.write_bval_file(bvals, bval_file)
common.write_bvec_file(bvecs, bvec_file)
else:
bval_file = None
bvec_file = None
bvals = None
bvecs = None
return bvals, bvecs, bval_file, bvec_file | [
"def",
"_create_bvals_bvecs",
"(",
"multiframe_dicom",
",",
"bval_file",
",",
"bvec_file",
",",
"nifti",
",",
"nifti_file",
")",
":",
"# create the empty arrays",
"number_of_stack_slices",
"=",
"common",
".",
"get_ss_value",
"(",
"multiframe_dicom",
"[",
"Tag",
"(",
"0x2001",
",",
"0x105f",
")",
"]",
"[",
"0",
"]",
"[",
"Tag",
"(",
"0x2001",
",",
"0x102d",
")",
"]",
")",
"number_of_stacks",
"=",
"int",
"(",
"int",
"(",
"multiframe_dicom",
".",
"NumberOfFrames",
")",
"/",
"number_of_stack_slices",
")",
"bvals",
"=",
"numpy",
".",
"zeros",
"(",
"[",
"number_of_stacks",
"]",
",",
"dtype",
"=",
"numpy",
".",
"int32",
")",
"bvecs",
"=",
"numpy",
".",
"zeros",
"(",
"[",
"number_of_stacks",
",",
"3",
"]",
")",
"# loop over all timepoints and create a list with all bvals and bvecs",
"for",
"stack_index",
"in",
"range",
"(",
"0",
",",
"number_of_stacks",
")",
":",
"stack",
"=",
"multiframe_dicom",
"[",
"Tag",
"(",
"0x5200",
",",
"0x9230",
")",
"]",
"[",
"stack_index",
"]",
"if",
"str",
"(",
"stack",
"[",
"Tag",
"(",
"0x0018",
",",
"0x9117",
")",
"]",
"[",
"0",
"]",
"[",
"Tag",
"(",
"0x0018",
",",
"0x9075",
")",
"]",
".",
"value",
")",
"==",
"'DIRECTIONAL'",
":",
"bvals",
"[",
"stack_index",
"]",
"=",
"common",
".",
"get_fd_value",
"(",
"stack",
"[",
"Tag",
"(",
"0x0018",
",",
"0x9117",
")",
"]",
"[",
"0",
"]",
"[",
"Tag",
"(",
"0x0018",
",",
"0x9087",
")",
"]",
")",
"bvecs",
"[",
"stack_index",
",",
":",
"]",
"=",
"common",
".",
"get_fd_array_value",
"(",
"stack",
"[",
"Tag",
"(",
"0x0018",
",",
"0x9117",
")",
"]",
"[",
"0",
"]",
"[",
"Tag",
"(",
"0x0018",
",",
"0x9076",
")",
"]",
"[",
"0",
"]",
"[",
"Tag",
"(",
"0x0018",
",",
"0x9089",
")",
"]",
",",
"3",
")",
"# truncate nifti if needed",
"nifti",
",",
"bvals",
",",
"bvecs",
"=",
"_fix_diffusion_images",
"(",
"bvals",
",",
"bvecs",
",",
"nifti",
",",
"nifti_file",
")",
"# save the found bvecs to the file",
"if",
"numpy",
".",
"count_nonzero",
"(",
"bvals",
")",
">",
"0",
"or",
"numpy",
".",
"count_nonzero",
"(",
"bvecs",
")",
">",
"0",
":",
"common",
".",
"write_bval_file",
"(",
"bvals",
",",
"bval_file",
")",
"common",
".",
"write_bvec_file",
"(",
"bvecs",
",",
"bvec_file",
")",
"else",
":",
"bval_file",
"=",
"None",
"bvec_file",
"=",
"None",
"bvals",
"=",
"None",
"bvecs",
"=",
"None",
"return",
"bvals",
",",
"bvecs",
",",
"bval_file",
",",
"bvec_file"
]
| Write the bvals from the sorted dicom files to a bval file
Inspired by https://github.com/IBIC/ibicUtils/blob/master/ibicBvalsBvecs.py | [
"Write",
"the",
"bvals",
"from",
"the",
"sorted",
"dicom",
"files",
"to",
"a",
"bval",
"file",
"Inspired",
"by",
"https",
":",
"//",
"github",
".",
"com",
"/",
"IBIC",
"/",
"ibicUtils",
"/",
"blob",
"/",
"master",
"/",
"ibicBvalsBvecs",
".",
"py"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/convert_philips.py#L511-L545 |
icometrix/dicom2nifti | dicom2nifti/convert_philips.py | _fix_diffusion_images | def _fix_diffusion_images(bvals, bvecs, nifti, nifti_file):
"""
This function will remove the last timepoint from the nifti, bvals and bvecs if the last vector is 0,0,0
This is sometimes added at the end by philips
"""
# if all zero continue of if the last bvec is not all zero continue
if numpy.count_nonzero(bvecs) == 0 or not numpy.count_nonzero(bvals[-1]) == 0:
# nothing needs to be done here
return nifti, bvals, bvecs
# remove last elements from bvals and bvecs
bvals = bvals[:-1]
bvecs = bvecs[:-1]
# remove last elements from the nifti
new_nifti = nibabel.Nifti1Image(nifti.get_data()[:, :, :, :-1], nifti.affine)
new_nifti.to_filename(nifti_file)
return new_nifti, bvals, bvecs | python | def _fix_diffusion_images(bvals, bvecs, nifti, nifti_file):
if numpy.count_nonzero(bvecs) == 0 or not numpy.count_nonzero(bvals[-1]) == 0:
return nifti, bvals, bvecs
bvals = bvals[:-1]
bvecs = bvecs[:-1]
new_nifti = nibabel.Nifti1Image(nifti.get_data()[:, :, :, :-1], nifti.affine)
new_nifti.to_filename(nifti_file)
return new_nifti, bvals, bvecs | [
"def",
"_fix_diffusion_images",
"(",
"bvals",
",",
"bvecs",
",",
"nifti",
",",
"nifti_file",
")",
":",
"# if all zero continue of if the last bvec is not all zero continue",
"if",
"numpy",
".",
"count_nonzero",
"(",
"bvecs",
")",
"==",
"0",
"or",
"not",
"numpy",
".",
"count_nonzero",
"(",
"bvals",
"[",
"-",
"1",
"]",
")",
"==",
"0",
":",
"# nothing needs to be done here",
"return",
"nifti",
",",
"bvals",
",",
"bvecs",
"# remove last elements from bvals and bvecs",
"bvals",
"=",
"bvals",
"[",
":",
"-",
"1",
"]",
"bvecs",
"=",
"bvecs",
"[",
":",
"-",
"1",
"]",
"# remove last elements from the nifti",
"new_nifti",
"=",
"nibabel",
".",
"Nifti1Image",
"(",
"nifti",
".",
"get_data",
"(",
")",
"[",
":",
",",
":",
",",
":",
",",
":",
"-",
"1",
"]",
",",
"nifti",
".",
"affine",
")",
"new_nifti",
".",
"to_filename",
"(",
"nifti_file",
")",
"return",
"new_nifti",
",",
"bvals",
",",
"bvecs"
]
| This function will remove the last timepoint from the nifti, bvals and bvecs if the last vector is 0,0,0
This is sometimes added at the end by philips | [
"This",
"function",
"will",
"remove",
"the",
"last",
"timepoint",
"from",
"the",
"nifti",
"bvals",
"and",
"bvecs",
"if",
"the",
"last",
"vector",
"is",
"0",
"0",
"0",
"This",
"is",
"sometimes",
"added",
"at",
"the",
"end",
"by",
"philips"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/convert_philips.py#L548-L565 |
icometrix/dicom2nifti | dicom2nifti/convert_philips.py | _create_singleframe_bvals_bvecs | def _create_singleframe_bvals_bvecs(grouped_dicoms, bval_file, bvec_file, nifti, nifti_file):
"""
Write the bvals from the sorted dicom files to a bval file
"""
# create the empty arrays
bvals = numpy.zeros([len(grouped_dicoms)], dtype=numpy.int32)
bvecs = numpy.zeros([len(grouped_dicoms), 3])
# loop over all timepoints and create a list with all bvals and bvecs
if _is_bval_type_a(grouped_dicoms):
bval_tag = Tag(0x2001, 0x1003)
bvec_x_tag = Tag(0x2005, 0x10b0)
bvec_y_tag = Tag(0x2005, 0x10b1)
bvec_z_tag = Tag(0x2005, 0x10b2)
for stack_index in range(0, len(grouped_dicoms)):
bvals[stack_index] = common.get_fl_value(grouped_dicoms[stack_index][0][bval_tag])
bvecs[stack_index, :] = [common.get_fl_value(grouped_dicoms[stack_index][0][bvec_x_tag]),
common.get_fl_value(grouped_dicoms[stack_index][0][bvec_y_tag]),
common.get_fl_value(grouped_dicoms[stack_index][0][bvec_z_tag])]
elif _is_bval_type_b(grouped_dicoms):
bval_tag = Tag(0x0018, 0x9087)
bvec_tag = Tag(0x0018, 0x9089)
for stack_index in range(0, len(grouped_dicoms)):
bvals[stack_index] = common.get_fd_value(grouped_dicoms[stack_index][0][bval_tag])
bvecs[stack_index, :] = common.get_fd_array_value(grouped_dicoms[stack_index][0][bvec_tag], 3)
# truncate nifti if needed
nifti, bvals, bvecs = _fix_diffusion_images(bvals, bvecs, nifti, nifti_file)
# save the found bvecs to the file
if numpy.count_nonzero(bvals) > 0 or numpy.count_nonzero(bvecs) > 0:
common.write_bval_file(bvals, bval_file)
common.write_bvec_file(bvecs, bvec_file)
else:
bval_file = None
bvec_file = None
bvals = None
bvecs = None
return nifti, bvals, bvecs, bval_file, bvec_file | python | def _create_singleframe_bvals_bvecs(grouped_dicoms, bval_file, bvec_file, nifti, nifti_file):
bvals = numpy.zeros([len(grouped_dicoms)], dtype=numpy.int32)
bvecs = numpy.zeros([len(grouped_dicoms), 3])
if _is_bval_type_a(grouped_dicoms):
bval_tag = Tag(0x2001, 0x1003)
bvec_x_tag = Tag(0x2005, 0x10b0)
bvec_y_tag = Tag(0x2005, 0x10b1)
bvec_z_tag = Tag(0x2005, 0x10b2)
for stack_index in range(0, len(grouped_dicoms)):
bvals[stack_index] = common.get_fl_value(grouped_dicoms[stack_index][0][bval_tag])
bvecs[stack_index, :] = [common.get_fl_value(grouped_dicoms[stack_index][0][bvec_x_tag]),
common.get_fl_value(grouped_dicoms[stack_index][0][bvec_y_tag]),
common.get_fl_value(grouped_dicoms[stack_index][0][bvec_z_tag])]
elif _is_bval_type_b(grouped_dicoms):
bval_tag = Tag(0x0018, 0x9087)
bvec_tag = Tag(0x0018, 0x9089)
for stack_index in range(0, len(grouped_dicoms)):
bvals[stack_index] = common.get_fd_value(grouped_dicoms[stack_index][0][bval_tag])
bvecs[stack_index, :] = common.get_fd_array_value(grouped_dicoms[stack_index][0][bvec_tag], 3)
nifti, bvals, bvecs = _fix_diffusion_images(bvals, bvecs, nifti, nifti_file)
if numpy.count_nonzero(bvals) > 0 or numpy.count_nonzero(bvecs) > 0:
common.write_bval_file(bvals, bval_file)
common.write_bvec_file(bvecs, bvec_file)
else:
bval_file = None
bvec_file = None
bvals = None
bvecs = None
return nifti, bvals, bvecs, bval_file, bvec_file | [
"def",
"_create_singleframe_bvals_bvecs",
"(",
"grouped_dicoms",
",",
"bval_file",
",",
"bvec_file",
",",
"nifti",
",",
"nifti_file",
")",
":",
"# create the empty arrays",
"bvals",
"=",
"numpy",
".",
"zeros",
"(",
"[",
"len",
"(",
"grouped_dicoms",
")",
"]",
",",
"dtype",
"=",
"numpy",
".",
"int32",
")",
"bvecs",
"=",
"numpy",
".",
"zeros",
"(",
"[",
"len",
"(",
"grouped_dicoms",
")",
",",
"3",
"]",
")",
"# loop over all timepoints and create a list with all bvals and bvecs",
"if",
"_is_bval_type_a",
"(",
"grouped_dicoms",
")",
":",
"bval_tag",
"=",
"Tag",
"(",
"0x2001",
",",
"0x1003",
")",
"bvec_x_tag",
"=",
"Tag",
"(",
"0x2005",
",",
"0x10b0",
")",
"bvec_y_tag",
"=",
"Tag",
"(",
"0x2005",
",",
"0x10b1",
")",
"bvec_z_tag",
"=",
"Tag",
"(",
"0x2005",
",",
"0x10b2",
")",
"for",
"stack_index",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"grouped_dicoms",
")",
")",
":",
"bvals",
"[",
"stack_index",
"]",
"=",
"common",
".",
"get_fl_value",
"(",
"grouped_dicoms",
"[",
"stack_index",
"]",
"[",
"0",
"]",
"[",
"bval_tag",
"]",
")",
"bvecs",
"[",
"stack_index",
",",
":",
"]",
"=",
"[",
"common",
".",
"get_fl_value",
"(",
"grouped_dicoms",
"[",
"stack_index",
"]",
"[",
"0",
"]",
"[",
"bvec_x_tag",
"]",
")",
",",
"common",
".",
"get_fl_value",
"(",
"grouped_dicoms",
"[",
"stack_index",
"]",
"[",
"0",
"]",
"[",
"bvec_y_tag",
"]",
")",
",",
"common",
".",
"get_fl_value",
"(",
"grouped_dicoms",
"[",
"stack_index",
"]",
"[",
"0",
"]",
"[",
"bvec_z_tag",
"]",
")",
"]",
"elif",
"_is_bval_type_b",
"(",
"grouped_dicoms",
")",
":",
"bval_tag",
"=",
"Tag",
"(",
"0x0018",
",",
"0x9087",
")",
"bvec_tag",
"=",
"Tag",
"(",
"0x0018",
",",
"0x9089",
")",
"for",
"stack_index",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"grouped_dicoms",
")",
")",
":",
"bvals",
"[",
"stack_index",
"]",
"=",
"common",
".",
"get_fd_value",
"(",
"grouped_dicoms",
"[",
"stack_index",
"]",
"[",
"0",
"]",
"[",
"bval_tag",
"]",
")",
"bvecs",
"[",
"stack_index",
",",
":",
"]",
"=",
"common",
".",
"get_fd_array_value",
"(",
"grouped_dicoms",
"[",
"stack_index",
"]",
"[",
"0",
"]",
"[",
"bvec_tag",
"]",
",",
"3",
")",
"# truncate nifti if needed",
"nifti",
",",
"bvals",
",",
"bvecs",
"=",
"_fix_diffusion_images",
"(",
"bvals",
",",
"bvecs",
",",
"nifti",
",",
"nifti_file",
")",
"# save the found bvecs to the file",
"if",
"numpy",
".",
"count_nonzero",
"(",
"bvals",
")",
">",
"0",
"or",
"numpy",
".",
"count_nonzero",
"(",
"bvecs",
")",
">",
"0",
":",
"common",
".",
"write_bval_file",
"(",
"bvals",
",",
"bval_file",
")",
"common",
".",
"write_bvec_file",
"(",
"bvecs",
",",
"bvec_file",
")",
"else",
":",
"bval_file",
"=",
"None",
"bvec_file",
"=",
"None",
"bvals",
"=",
"None",
"bvecs",
"=",
"None",
"return",
"nifti",
",",
"bvals",
",",
"bvecs",
",",
"bval_file",
",",
"bvec_file"
]
| Write the bvals from the sorted dicom files to a bval file | [
"Write",
"the",
"bvals",
"from",
"the",
"sorted",
"dicom",
"files",
"to",
"a",
"bval",
"file"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/convert_philips.py#L568-L607 |
icometrix/dicom2nifti | dicom2nifti/convert_generic.py | dicom_to_nifti | def dicom_to_nifti(dicom_input, output_file):
"""
This function will convert an anatomical dicom series to a nifti
Examples: See unit test
:param output_file: filepath to the output nifti
:param dicom_input: directory with the dicom files for a single scan, or list of read in dicoms
"""
if len(dicom_input) <= 0:
raise ConversionError('NO_DICOM_FILES_FOUND')
# remove duplicate slices based on position and data
dicom_input = _remove_duplicate_slices(dicom_input)
# remove localizers based on image type
dicom_input = _remove_localizers_by_imagetype(dicom_input)
if settings.validate_slicecount:
# remove_localizers based on image orientation (only valid if slicecount is validated)
dicom_input = _remove_localizers_by_orientation(dicom_input)
# validate all the dicom files for correct orientations
common.validate_slicecount(dicom_input)
if settings.validate_orientation:
# validate that all slices have the same orientation
common.validate_orientation(dicom_input)
if settings.validate_orthogonal:
# validate that we have an orthogonal image (to detect gantry tilting etc)
common.validate_orthogonal(dicom_input)
# sort the dicoms
dicom_input = common.sort_dicoms(dicom_input)
# validate slice increment inconsistent
slice_increment_inconsistent = False
if settings.validate_slice_increment:
# validate that all slices have a consistent slice increment
common.validate_slice_increment(dicom_input)
elif common.is_slice_increment_inconsistent(dicom_input):
slice_increment_inconsistent = True
# if inconsistent increment and we allow resampling then do the resampling based conversion to maintain the correct geometric shape
if slice_increment_inconsistent and settings.resample:
nii_image, max_slice_increment = _convert_slice_incement_inconsistencies(dicom_input)
# do the normal conversion
else:
# Get data; originally z,y,x, transposed to x,y,z
data = common.get_volume_pixeldata(dicom_input)
affine, max_slice_increment = common.create_affine(dicom_input)
# Convert to nifti
nii_image = nibabel.Nifti1Image(data, affine)
# Set TR and TE if available
if Tag(0x0018, 0x0081) in dicom_input[0] and Tag(0x0018, 0x0081) in dicom_input[0]:
common.set_tr_te(nii_image, float(dicom_input[0].RepetitionTime), float(dicom_input[0].EchoTime))
# Save to disk
if output_file is not None:
logger.info('Saving nifti to disk %s' % output_file)
nii_image.to_filename(output_file)
return {'NII_FILE': output_file,
'NII': nii_image,
'MAX_SLICE_INCREMENT': max_slice_increment} | python | def dicom_to_nifti(dicom_input, output_file):
if len(dicom_input) <= 0:
raise ConversionError('NO_DICOM_FILES_FOUND')
dicom_input = _remove_duplicate_slices(dicom_input)
dicom_input = _remove_localizers_by_imagetype(dicom_input)
if settings.validate_slicecount:
dicom_input = _remove_localizers_by_orientation(dicom_input)
common.validate_slicecount(dicom_input)
if settings.validate_orientation:
common.validate_orientation(dicom_input)
if settings.validate_orthogonal:
common.validate_orthogonal(dicom_input)
dicom_input = common.sort_dicoms(dicom_input)
slice_increment_inconsistent = False
if settings.validate_slice_increment:
common.validate_slice_increment(dicom_input)
elif common.is_slice_increment_inconsistent(dicom_input):
slice_increment_inconsistent = True
if slice_increment_inconsistent and settings.resample:
nii_image, max_slice_increment = _convert_slice_incement_inconsistencies(dicom_input)
else:
data = common.get_volume_pixeldata(dicom_input)
affine, max_slice_increment = common.create_affine(dicom_input)
nii_image = nibabel.Nifti1Image(data, affine)
if Tag(0x0018, 0x0081) in dicom_input[0] and Tag(0x0018, 0x0081) in dicom_input[0]:
common.set_tr_te(nii_image, float(dicom_input[0].RepetitionTime), float(dicom_input[0].EchoTime))
if output_file is not None:
logger.info('Saving nifti to disk %s' % output_file)
nii_image.to_filename(output_file)
return {'NII_FILE': output_file,
'NII': nii_image,
'MAX_SLICE_INCREMENT': max_slice_increment} | [
"def",
"dicom_to_nifti",
"(",
"dicom_input",
",",
"output_file",
")",
":",
"if",
"len",
"(",
"dicom_input",
")",
"<=",
"0",
":",
"raise",
"ConversionError",
"(",
"'NO_DICOM_FILES_FOUND'",
")",
"# remove duplicate slices based on position and data",
"dicom_input",
"=",
"_remove_duplicate_slices",
"(",
"dicom_input",
")",
"# remove localizers based on image type",
"dicom_input",
"=",
"_remove_localizers_by_imagetype",
"(",
"dicom_input",
")",
"if",
"settings",
".",
"validate_slicecount",
":",
"# remove_localizers based on image orientation (only valid if slicecount is validated)",
"dicom_input",
"=",
"_remove_localizers_by_orientation",
"(",
"dicom_input",
")",
"# validate all the dicom files for correct orientations",
"common",
".",
"validate_slicecount",
"(",
"dicom_input",
")",
"if",
"settings",
".",
"validate_orientation",
":",
"# validate that all slices have the same orientation",
"common",
".",
"validate_orientation",
"(",
"dicom_input",
")",
"if",
"settings",
".",
"validate_orthogonal",
":",
"# validate that we have an orthogonal image (to detect gantry tilting etc)",
"common",
".",
"validate_orthogonal",
"(",
"dicom_input",
")",
"# sort the dicoms",
"dicom_input",
"=",
"common",
".",
"sort_dicoms",
"(",
"dicom_input",
")",
"# validate slice increment inconsistent",
"slice_increment_inconsistent",
"=",
"False",
"if",
"settings",
".",
"validate_slice_increment",
":",
"# validate that all slices have a consistent slice increment",
"common",
".",
"validate_slice_increment",
"(",
"dicom_input",
")",
"elif",
"common",
".",
"is_slice_increment_inconsistent",
"(",
"dicom_input",
")",
":",
"slice_increment_inconsistent",
"=",
"True",
"# if inconsistent increment and we allow resampling then do the resampling based conversion to maintain the correct geometric shape",
"if",
"slice_increment_inconsistent",
"and",
"settings",
".",
"resample",
":",
"nii_image",
",",
"max_slice_increment",
"=",
"_convert_slice_incement_inconsistencies",
"(",
"dicom_input",
")",
"# do the normal conversion",
"else",
":",
"# Get data; originally z,y,x, transposed to x,y,z",
"data",
"=",
"common",
".",
"get_volume_pixeldata",
"(",
"dicom_input",
")",
"affine",
",",
"max_slice_increment",
"=",
"common",
".",
"create_affine",
"(",
"dicom_input",
")",
"# Convert to nifti",
"nii_image",
"=",
"nibabel",
".",
"Nifti1Image",
"(",
"data",
",",
"affine",
")",
"# Set TR and TE if available",
"if",
"Tag",
"(",
"0x0018",
",",
"0x0081",
")",
"in",
"dicom_input",
"[",
"0",
"]",
"and",
"Tag",
"(",
"0x0018",
",",
"0x0081",
")",
"in",
"dicom_input",
"[",
"0",
"]",
":",
"common",
".",
"set_tr_te",
"(",
"nii_image",
",",
"float",
"(",
"dicom_input",
"[",
"0",
"]",
".",
"RepetitionTime",
")",
",",
"float",
"(",
"dicom_input",
"[",
"0",
"]",
".",
"EchoTime",
")",
")",
"# Save to disk",
"if",
"output_file",
"is",
"not",
"None",
":",
"logger",
".",
"info",
"(",
"'Saving nifti to disk %s'",
"%",
"output_file",
")",
"nii_image",
".",
"to_filename",
"(",
"output_file",
")",
"return",
"{",
"'NII_FILE'",
":",
"output_file",
",",
"'NII'",
":",
"nii_image",
",",
"'MAX_SLICE_INCREMENT'",
":",
"max_slice_increment",
"}"
]
| This function will convert an anatomical dicom series to a nifti
Examples: See unit test
:param output_file: filepath to the output nifti
:param dicom_input: directory with the dicom files for a single scan, or list of read in dicoms | [
"This",
"function",
"will",
"convert",
"an",
"anatomical",
"dicom",
"series",
"to",
"a",
"nifti"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/convert_generic.py#L29-L94 |
icometrix/dicom2nifti | dicom2nifti/convert_generic.py | _remove_duplicate_slices | def _remove_duplicate_slices(dicoms):
"""
Search dicoms for localizers and delete them
"""
# Loop overall files and build dict
dicoms_dict = {}
filtered_dicoms = []
for dicom_ in dicoms:
if tuple(dicom_.ImagePositionPatient) not in dicoms_dict:
dicoms_dict[tuple(dicom_.ImagePositionPatient)] = dicom_
filtered_dicoms.append(dicom_)
else:
if numpy.array_equal(dicom_.pixel_array,
dicoms_dict[tuple(dicom_.ImagePositionPatient)].pixel_array):
logger.warning('Removing duplicate slice from series')
else:
filtered_dicoms.append(dicom_)
return filtered_dicoms | python | def _remove_duplicate_slices(dicoms):
dicoms_dict = {}
filtered_dicoms = []
for dicom_ in dicoms:
if tuple(dicom_.ImagePositionPatient) not in dicoms_dict:
dicoms_dict[tuple(dicom_.ImagePositionPatient)] = dicom_
filtered_dicoms.append(dicom_)
else:
if numpy.array_equal(dicom_.pixel_array,
dicoms_dict[tuple(dicom_.ImagePositionPatient)].pixel_array):
logger.warning('Removing duplicate slice from series')
else:
filtered_dicoms.append(dicom_)
return filtered_dicoms | [
"def",
"_remove_duplicate_slices",
"(",
"dicoms",
")",
":",
"# Loop overall files and build dict",
"dicoms_dict",
"=",
"{",
"}",
"filtered_dicoms",
"=",
"[",
"]",
"for",
"dicom_",
"in",
"dicoms",
":",
"if",
"tuple",
"(",
"dicom_",
".",
"ImagePositionPatient",
")",
"not",
"in",
"dicoms_dict",
":",
"dicoms_dict",
"[",
"tuple",
"(",
"dicom_",
".",
"ImagePositionPatient",
")",
"]",
"=",
"dicom_",
"filtered_dicoms",
".",
"append",
"(",
"dicom_",
")",
"else",
":",
"if",
"numpy",
".",
"array_equal",
"(",
"dicom_",
".",
"pixel_array",
",",
"dicoms_dict",
"[",
"tuple",
"(",
"dicom_",
".",
"ImagePositionPatient",
")",
"]",
".",
"pixel_array",
")",
":",
"logger",
".",
"warning",
"(",
"'Removing duplicate slice from series'",
")",
"else",
":",
"filtered_dicoms",
".",
"append",
"(",
"dicom_",
")",
"return",
"filtered_dicoms"
]
| Search dicoms for localizers and delete them | [
"Search",
"dicoms",
"for",
"localizers",
"and",
"delete",
"them"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/convert_generic.py#L97-L115 |
icometrix/dicom2nifti | dicom2nifti/convert_generic.py | _remove_localizers_by_imagetype | def _remove_localizers_by_imagetype(dicoms):
"""
Search dicoms for localizers and delete them
"""
# Loop overall files and build dict
filtered_dicoms = []
for dicom_ in dicoms:
if 'ImageType' in dicom_ and 'LOCALIZER' in dicom_.ImageType:
continue
# 'Projection Image' are Localizers for CT only see MSMET-234
if 'CT' in dicom_.Modality and 'ImageType' in dicom_ and 'PROJECTION IMAGE' in dicom_.ImageType:
continue
filtered_dicoms.append(dicom_)
return filtered_dicoms | python | def _remove_localizers_by_imagetype(dicoms):
filtered_dicoms = []
for dicom_ in dicoms:
if 'ImageType' in dicom_ and 'LOCALIZER' in dicom_.ImageType:
continue
if 'CT' in dicom_.Modality and 'ImageType' in dicom_ and 'PROJECTION IMAGE' in dicom_.ImageType:
continue
filtered_dicoms.append(dicom_)
return filtered_dicoms | [
"def",
"_remove_localizers_by_imagetype",
"(",
"dicoms",
")",
":",
"# Loop overall files and build dict",
"filtered_dicoms",
"=",
"[",
"]",
"for",
"dicom_",
"in",
"dicoms",
":",
"if",
"'ImageType'",
"in",
"dicom_",
"and",
"'LOCALIZER'",
"in",
"dicom_",
".",
"ImageType",
":",
"continue",
"# 'Projection Image' are Localizers for CT only see MSMET-234",
"if",
"'CT'",
"in",
"dicom_",
".",
"Modality",
"and",
"'ImageType'",
"in",
"dicom_",
"and",
"'PROJECTION IMAGE'",
"in",
"dicom_",
".",
"ImageType",
":",
"continue",
"filtered_dicoms",
".",
"append",
"(",
"dicom_",
")",
"return",
"filtered_dicoms"
]
| Search dicoms for localizers and delete them | [
"Search",
"dicoms",
"for",
"localizers",
"and",
"delete",
"them"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/convert_generic.py#L118-L131 |
icometrix/dicom2nifti | dicom2nifti/convert_generic.py | _remove_localizers_by_orientation | def _remove_localizers_by_orientation(dicoms):
"""
Removing localizers based on the orientation.
This is needed as in some cases with ct data there are some localizer/projection type images that cannot
be distiguished by the dicom headers. This is why we kick out all orientations that do not have more than 4 files
4 is the limit anyway for converting to nifti on our case
"""
orientations = []
sorted_dicoms = {}
# Loop overall files and build dict
for dicom_header in dicoms:
# Create affine matrix (http://nipy.sourceforge.net/nibabel/dicom/dicom_orientation.html#dicom-slice-affine)
image_orient1 = numpy.array(dicom_header.ImageOrientationPatient)[0:3]
image_orient2 = numpy.array(dicom_header.ImageOrientationPatient)[3:6]
image_orient_combined = (image_orient1.tolist(), image_orient2.tolist())
found_orientation = False
for orientation in orientations:
if numpy.allclose(image_orient_combined[0], numpy.array(orientation[0]), rtol=0.001, atol=0.001) \
and numpy.allclose(image_orient_combined[1], numpy.array(orientation[1]), rtol=0.001,
atol=0.001):
sorted_dicoms[str(orientation)].append(dicom_header)
found_orientation = True
break
if not found_orientation:
orientations.append(image_orient_combined)
sorted_dicoms[str(image_orient_combined)] = [dicom_header]
# if there are multiple possible orientations delete orientations where there are less than 4 files
# we don't convert anything less that that anyway
if len(sorted_dicoms) > 1:
filtered_dicoms = []
for orientation in sorted_dicoms.keys():
if len(sorted_dicoms[orientation]) >= 4:
filtered_dicoms.extend(sorted_dicoms[orientation])
return filtered_dicoms
else:
return six.next(six.itervalues(sorted_dicoms)) | python | def _remove_localizers_by_orientation(dicoms):
orientations = []
sorted_dicoms = {}
for dicom_header in dicoms:
image_orient1 = numpy.array(dicom_header.ImageOrientationPatient)[0:3]
image_orient2 = numpy.array(dicom_header.ImageOrientationPatient)[3:6]
image_orient_combined = (image_orient1.tolist(), image_orient2.tolist())
found_orientation = False
for orientation in orientations:
if numpy.allclose(image_orient_combined[0], numpy.array(orientation[0]), rtol=0.001, atol=0.001) \
and numpy.allclose(image_orient_combined[1], numpy.array(orientation[1]), rtol=0.001,
atol=0.001):
sorted_dicoms[str(orientation)].append(dicom_header)
found_orientation = True
break
if not found_orientation:
orientations.append(image_orient_combined)
sorted_dicoms[str(image_orient_combined)] = [dicom_header]
if len(sorted_dicoms) > 1:
filtered_dicoms = []
for orientation in sorted_dicoms.keys():
if len(sorted_dicoms[orientation]) >= 4:
filtered_dicoms.extend(sorted_dicoms[orientation])
return filtered_dicoms
else:
return six.next(six.itervalues(sorted_dicoms)) | [
"def",
"_remove_localizers_by_orientation",
"(",
"dicoms",
")",
":",
"orientations",
"=",
"[",
"]",
"sorted_dicoms",
"=",
"{",
"}",
"# Loop overall files and build dict",
"for",
"dicom_header",
"in",
"dicoms",
":",
"# Create affine matrix (http://nipy.sourceforge.net/nibabel/dicom/dicom_orientation.html#dicom-slice-affine)",
"image_orient1",
"=",
"numpy",
".",
"array",
"(",
"dicom_header",
".",
"ImageOrientationPatient",
")",
"[",
"0",
":",
"3",
"]",
"image_orient2",
"=",
"numpy",
".",
"array",
"(",
"dicom_header",
".",
"ImageOrientationPatient",
")",
"[",
"3",
":",
"6",
"]",
"image_orient_combined",
"=",
"(",
"image_orient1",
".",
"tolist",
"(",
")",
",",
"image_orient2",
".",
"tolist",
"(",
")",
")",
"found_orientation",
"=",
"False",
"for",
"orientation",
"in",
"orientations",
":",
"if",
"numpy",
".",
"allclose",
"(",
"image_orient_combined",
"[",
"0",
"]",
",",
"numpy",
".",
"array",
"(",
"orientation",
"[",
"0",
"]",
")",
",",
"rtol",
"=",
"0.001",
",",
"atol",
"=",
"0.001",
")",
"and",
"numpy",
".",
"allclose",
"(",
"image_orient_combined",
"[",
"1",
"]",
",",
"numpy",
".",
"array",
"(",
"orientation",
"[",
"1",
"]",
")",
",",
"rtol",
"=",
"0.001",
",",
"atol",
"=",
"0.001",
")",
":",
"sorted_dicoms",
"[",
"str",
"(",
"orientation",
")",
"]",
".",
"append",
"(",
"dicom_header",
")",
"found_orientation",
"=",
"True",
"break",
"if",
"not",
"found_orientation",
":",
"orientations",
".",
"append",
"(",
"image_orient_combined",
")",
"sorted_dicoms",
"[",
"str",
"(",
"image_orient_combined",
")",
"]",
"=",
"[",
"dicom_header",
"]",
"# if there are multiple possible orientations delete orientations where there are less than 4 files",
"# we don't convert anything less that that anyway",
"if",
"len",
"(",
"sorted_dicoms",
")",
">",
"1",
":",
"filtered_dicoms",
"=",
"[",
"]",
"for",
"orientation",
"in",
"sorted_dicoms",
".",
"keys",
"(",
")",
":",
"if",
"len",
"(",
"sorted_dicoms",
"[",
"orientation",
"]",
")",
">=",
"4",
":",
"filtered_dicoms",
".",
"extend",
"(",
"sorted_dicoms",
"[",
"orientation",
"]",
")",
"return",
"filtered_dicoms",
"else",
":",
"return",
"six",
".",
"next",
"(",
"six",
".",
"itervalues",
"(",
"sorted_dicoms",
")",
")"
]
| Removing localizers based on the orientation.
This is needed as in some cases with ct data there are some localizer/projection type images that cannot
be distiguished by the dicom headers. This is why we kick out all orientations that do not have more than 4 files
4 is the limit anyway for converting to nifti on our case | [
"Removing",
"localizers",
"based",
"on",
"the",
"orientation",
".",
"This",
"is",
"needed",
"as",
"in",
"some",
"cases",
"with",
"ct",
"data",
"there",
"are",
"some",
"localizer",
"/",
"projection",
"type",
"images",
"that",
"cannot",
"be",
"distiguished",
"by",
"the",
"dicom",
"headers",
".",
"This",
"is",
"why",
"we",
"kick",
"out",
"all",
"orientations",
"that",
"do",
"not",
"have",
"more",
"than",
"4",
"files",
"4",
"is",
"the",
"limit",
"anyway",
"for",
"converting",
"to",
"nifti",
"on",
"our",
"case"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/convert_generic.py#L134-L171 |
icometrix/dicom2nifti | dicom2nifti/convert_generic.py | _convert_slice_incement_inconsistencies | def _convert_slice_incement_inconsistencies(dicom_input):
"""
If there is slice increment inconsistency detected, for the moment CT images, then split the volumes into subvolumes based on the slice increment and process each volume separately using a space constructed based on the highest resolution increment
"""
# Estimate the "first" slice increment based on the 2 first slices
increment = numpy.array(dicom_input[0].ImagePositionPatient) - numpy.array(dicom_input[1].ImagePositionPatient)
# Create as many volumes as many changes in slice increment. NB Increments might be repeated in different volumes
max_slice_increment = 0
slice_incement_groups = []
current_group = [dicom_input[0], dicom_input[1]]
previous_image_position = numpy.array(dicom_input[1].ImagePositionPatient)
for dicom in dicom_input[2:]:
current_image_position = numpy.array(dicom.ImagePositionPatient)
current_increment = previous_image_position - current_image_position
max_slice_increment = max(max_slice_increment, numpy.linalg.norm(current_increment))
if numpy.allclose(increment, current_increment, rtol=0.05, atol=0.1):
current_group.append(dicom)
if not numpy.allclose(increment, current_increment, rtol=0.05, atol=0.1):
slice_incement_groups.append(current_group)
current_group = [current_group[-1], dicom]
increment = current_increment
previous_image_position = current_image_position
slice_incement_groups.append(current_group)
# Create nibabel objects for each volume based on the corresponding headers
slice_incement_niftis = []
for dicom_slices in slice_incement_groups:
data = common.get_volume_pixeldata(dicom_slices)
affine, _ = common.create_affine(dicom_slices)
slice_incement_niftis.append(nibabel.Nifti1Image(data, affine))
nifti_volume = resample.resample_nifti_images(slice_incement_niftis)
return nifti_volume, max_slice_increment | python | def _convert_slice_incement_inconsistencies(dicom_input):
increment = numpy.array(dicom_input[0].ImagePositionPatient) - numpy.array(dicom_input[1].ImagePositionPatient)
max_slice_increment = 0
slice_incement_groups = []
current_group = [dicom_input[0], dicom_input[1]]
previous_image_position = numpy.array(dicom_input[1].ImagePositionPatient)
for dicom in dicom_input[2:]:
current_image_position = numpy.array(dicom.ImagePositionPatient)
current_increment = previous_image_position - current_image_position
max_slice_increment = max(max_slice_increment, numpy.linalg.norm(current_increment))
if numpy.allclose(increment, current_increment, rtol=0.05, atol=0.1):
current_group.append(dicom)
if not numpy.allclose(increment, current_increment, rtol=0.05, atol=0.1):
slice_incement_groups.append(current_group)
current_group = [current_group[-1], dicom]
increment = current_increment
previous_image_position = current_image_position
slice_incement_groups.append(current_group)
slice_incement_niftis = []
for dicom_slices in slice_incement_groups:
data = common.get_volume_pixeldata(dicom_slices)
affine, _ = common.create_affine(dicom_slices)
slice_incement_niftis.append(nibabel.Nifti1Image(data, affine))
nifti_volume = resample.resample_nifti_images(slice_incement_niftis)
return nifti_volume, max_slice_increment | [
"def",
"_convert_slice_incement_inconsistencies",
"(",
"dicom_input",
")",
":",
"# Estimate the \"first\" slice increment based on the 2 first slices",
"increment",
"=",
"numpy",
".",
"array",
"(",
"dicom_input",
"[",
"0",
"]",
".",
"ImagePositionPatient",
")",
"-",
"numpy",
".",
"array",
"(",
"dicom_input",
"[",
"1",
"]",
".",
"ImagePositionPatient",
")",
"# Create as many volumes as many changes in slice increment. NB Increments might be repeated in different volumes",
"max_slice_increment",
"=",
"0",
"slice_incement_groups",
"=",
"[",
"]",
"current_group",
"=",
"[",
"dicom_input",
"[",
"0",
"]",
",",
"dicom_input",
"[",
"1",
"]",
"]",
"previous_image_position",
"=",
"numpy",
".",
"array",
"(",
"dicom_input",
"[",
"1",
"]",
".",
"ImagePositionPatient",
")",
"for",
"dicom",
"in",
"dicom_input",
"[",
"2",
":",
"]",
":",
"current_image_position",
"=",
"numpy",
".",
"array",
"(",
"dicom",
".",
"ImagePositionPatient",
")",
"current_increment",
"=",
"previous_image_position",
"-",
"current_image_position",
"max_slice_increment",
"=",
"max",
"(",
"max_slice_increment",
",",
"numpy",
".",
"linalg",
".",
"norm",
"(",
"current_increment",
")",
")",
"if",
"numpy",
".",
"allclose",
"(",
"increment",
",",
"current_increment",
",",
"rtol",
"=",
"0.05",
",",
"atol",
"=",
"0.1",
")",
":",
"current_group",
".",
"append",
"(",
"dicom",
")",
"if",
"not",
"numpy",
".",
"allclose",
"(",
"increment",
",",
"current_increment",
",",
"rtol",
"=",
"0.05",
",",
"atol",
"=",
"0.1",
")",
":",
"slice_incement_groups",
".",
"append",
"(",
"current_group",
")",
"current_group",
"=",
"[",
"current_group",
"[",
"-",
"1",
"]",
",",
"dicom",
"]",
"increment",
"=",
"current_increment",
"previous_image_position",
"=",
"current_image_position",
"slice_incement_groups",
".",
"append",
"(",
"current_group",
")",
"# Create nibabel objects for each volume based on the corresponding headers",
"slice_incement_niftis",
"=",
"[",
"]",
"for",
"dicom_slices",
"in",
"slice_incement_groups",
":",
"data",
"=",
"common",
".",
"get_volume_pixeldata",
"(",
"dicom_slices",
")",
"affine",
",",
"_",
"=",
"common",
".",
"create_affine",
"(",
"dicom_slices",
")",
"slice_incement_niftis",
".",
"append",
"(",
"nibabel",
".",
"Nifti1Image",
"(",
"data",
",",
"affine",
")",
")",
"nifti_volume",
"=",
"resample",
".",
"resample_nifti_images",
"(",
"slice_incement_niftis",
")",
"return",
"nifti_volume",
",",
"max_slice_increment"
]
| If there is slice increment inconsistency detected, for the moment CT images, then split the volumes into subvolumes based on the slice increment and process each volume separately using a space constructed based on the highest resolution increment | [
"If",
"there",
"is",
"slice",
"increment",
"inconsistency",
"detected",
"for",
"the",
"moment",
"CT",
"images",
"then",
"split",
"the",
"volumes",
"into",
"subvolumes",
"based",
"on",
"the",
"slice",
"increment",
"and",
"process",
"each",
"volume",
"separately",
"using",
"a",
"space",
"constructed",
"based",
"on",
"the",
"highest",
"resolution",
"increment"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/convert_generic.py#L174-L209 |
icometrix/dicom2nifti | dicom2nifti/common.py | read_dicom_directory | def read_dicom_directory(dicom_directory, stop_before_pixels=False):
"""
Read all dicom files in a given directory (stop before pixels)
:type stop_before_pixels: bool
:type dicom_directory: six.string_types
:param stop_before_pixels: Should we stop reading before the pixeldata (handy if we only want header info)
:param dicom_directory: Directory with dicom data
:return: List of dicom objects
"""
dicom_input = []
for root, _, files in os.walk(dicom_directory):
for dicom_file in files:
file_path = os.path.join(root, dicom_file)
if compressed_dicom.is_dicom_file(file_path):
dicom_headers = compressed_dicom.read_file(file_path,
defer_size="1 KB",
stop_before_pixels=stop_before_pixels,
force=dicom2nifti.settings.pydicom_read_force)
if is_valid_imaging_dicom(dicom_headers):
dicom_input.append(dicom_headers)
return dicom_input | python | def read_dicom_directory(dicom_directory, stop_before_pixels=False):
dicom_input = []
for root, _, files in os.walk(dicom_directory):
for dicom_file in files:
file_path = os.path.join(root, dicom_file)
if compressed_dicom.is_dicom_file(file_path):
dicom_headers = compressed_dicom.read_file(file_path,
defer_size="1 KB",
stop_before_pixels=stop_before_pixels,
force=dicom2nifti.settings.pydicom_read_force)
if is_valid_imaging_dicom(dicom_headers):
dicom_input.append(dicom_headers)
return dicom_input | [
"def",
"read_dicom_directory",
"(",
"dicom_directory",
",",
"stop_before_pixels",
"=",
"False",
")",
":",
"dicom_input",
"=",
"[",
"]",
"for",
"root",
",",
"_",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"dicom_directory",
")",
":",
"for",
"dicom_file",
"in",
"files",
":",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"dicom_file",
")",
"if",
"compressed_dicom",
".",
"is_dicom_file",
"(",
"file_path",
")",
":",
"dicom_headers",
"=",
"compressed_dicom",
".",
"read_file",
"(",
"file_path",
",",
"defer_size",
"=",
"\"1 KB\"",
",",
"stop_before_pixels",
"=",
"stop_before_pixels",
",",
"force",
"=",
"dicom2nifti",
".",
"settings",
".",
"pydicom_read_force",
")",
"if",
"is_valid_imaging_dicom",
"(",
"dicom_headers",
")",
":",
"dicom_input",
".",
"append",
"(",
"dicom_headers",
")",
"return",
"dicom_input"
]
| Read all dicom files in a given directory (stop before pixels)
:type stop_before_pixels: bool
:type dicom_directory: six.string_types
:param stop_before_pixels: Should we stop reading before the pixeldata (handy if we only want header info)
:param dicom_directory: Directory with dicom data
:return: List of dicom objects | [
"Read",
"all",
"dicom",
"files",
"in",
"a",
"given",
"directory",
"(",
"stop",
"before",
"pixels",
")"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/common.py#L33-L54 |
icometrix/dicom2nifti | dicom2nifti/common.py | is_hitachi | def is_hitachi(dicom_input):
"""
Use this function to detect if a dicom series is a hitachi dataset
:param dicom_input: directory with dicom files for 1 scan of a dicom_header
"""
# read dicom header
header = dicom_input[0]
if 'Manufacturer' not in header or 'Modality' not in header:
return False # we try generic conversion in these cases
# check if Modality is mr
if header.Modality.upper() != 'MR':
return False
# check if manufacturer is hitachi
if 'HITACHI' not in header.Manufacturer.upper():
return False
return True | python | def is_hitachi(dicom_input):
header = dicom_input[0]
if 'Manufacturer' not in header or 'Modality' not in header:
return False
if header.Modality.upper() != 'MR':
return False
if 'HITACHI' not in header.Manufacturer.upper():
return False
return True | [
"def",
"is_hitachi",
"(",
"dicom_input",
")",
":",
"# read dicom header",
"header",
"=",
"dicom_input",
"[",
"0",
"]",
"if",
"'Manufacturer'",
"not",
"in",
"header",
"or",
"'Modality'",
"not",
"in",
"header",
":",
"return",
"False",
"# we try generic conversion in these cases",
"# check if Modality is mr",
"if",
"header",
".",
"Modality",
".",
"upper",
"(",
")",
"!=",
"'MR'",
":",
"return",
"False",
"# check if manufacturer is hitachi",
"if",
"'HITACHI'",
"not",
"in",
"header",
".",
"Manufacturer",
".",
"upper",
"(",
")",
":",
"return",
"False",
"return",
"True"
]
| Use this function to detect if a dicom series is a hitachi dataset
:param dicom_input: directory with dicom files for 1 scan of a dicom_header | [
"Use",
"this",
"function",
"to",
"detect",
"if",
"a",
"dicom",
"series",
"is",
"a",
"hitachi",
"dataset"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/common.py#L57-L77 |
icometrix/dicom2nifti | dicom2nifti/common.py | is_ge | def is_ge(dicom_input):
"""
Use this function to detect if a dicom series is a GE dataset
:param dicom_input: list with dicom objects
"""
# read dicom header
header = dicom_input[0]
if 'Manufacturer' not in header or 'Modality' not in header:
return False # we try generic conversion in these cases
# check if Modality is mr
if header.Modality.upper() != 'MR':
return False
# check if manufacturer is GE
if 'GE MEDICAL SYSTEMS' not in header.Manufacturer.upper():
return False
return True | python | def is_ge(dicom_input):
header = dicom_input[0]
if 'Manufacturer' not in header or 'Modality' not in header:
return False
if header.Modality.upper() != 'MR':
return False
if 'GE MEDICAL SYSTEMS' not in header.Manufacturer.upper():
return False
return True | [
"def",
"is_ge",
"(",
"dicom_input",
")",
":",
"# read dicom header",
"header",
"=",
"dicom_input",
"[",
"0",
"]",
"if",
"'Manufacturer'",
"not",
"in",
"header",
"or",
"'Modality'",
"not",
"in",
"header",
":",
"return",
"False",
"# we try generic conversion in these cases",
"# check if Modality is mr",
"if",
"header",
".",
"Modality",
".",
"upper",
"(",
")",
"!=",
"'MR'",
":",
"return",
"False",
"# check if manufacturer is GE",
"if",
"'GE MEDICAL SYSTEMS'",
"not",
"in",
"header",
".",
"Manufacturer",
".",
"upper",
"(",
")",
":",
"return",
"False",
"return",
"True"
]
| Use this function to detect if a dicom series is a GE dataset
:param dicom_input: list with dicom objects | [
"Use",
"this",
"function",
"to",
"detect",
"if",
"a",
"dicom",
"series",
"is",
"a",
"GE",
"dataset"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/common.py#L80-L100 |
icometrix/dicom2nifti | dicom2nifti/common.py | is_philips | def is_philips(dicom_input):
"""
Use this function to detect if a dicom series is a philips dataset
:param dicom_input: directory with dicom files for 1 scan of a dicom_header
"""
# read dicom header
header = dicom_input[0]
if 'Manufacturer' not in header or 'Modality' not in header:
return False # we try generic conversion in these cases
# check if Modality is mr
if header.Modality.upper() != 'MR':
return False
# check if manufacturer is Philips
if 'PHILIPS' not in header.Manufacturer.upper():
return False
return True | python | def is_philips(dicom_input):
header = dicom_input[0]
if 'Manufacturer' not in header or 'Modality' not in header:
return False
if header.Modality.upper() != 'MR':
return False
if 'PHILIPS' not in header.Manufacturer.upper():
return False
return True | [
"def",
"is_philips",
"(",
"dicom_input",
")",
":",
"# read dicom header",
"header",
"=",
"dicom_input",
"[",
"0",
"]",
"if",
"'Manufacturer'",
"not",
"in",
"header",
"or",
"'Modality'",
"not",
"in",
"header",
":",
"return",
"False",
"# we try generic conversion in these cases",
"# check if Modality is mr",
"if",
"header",
".",
"Modality",
".",
"upper",
"(",
")",
"!=",
"'MR'",
":",
"return",
"False",
"# check if manufacturer is Philips",
"if",
"'PHILIPS'",
"not",
"in",
"header",
".",
"Manufacturer",
".",
"upper",
"(",
")",
":",
"return",
"False",
"return",
"True"
]
| Use this function to detect if a dicom series is a philips dataset
:param dicom_input: directory with dicom files for 1 scan of a dicom_header | [
"Use",
"this",
"function",
"to",
"detect",
"if",
"a",
"dicom",
"series",
"is",
"a",
"philips",
"dataset"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/common.py#L103-L123 |
icometrix/dicom2nifti | dicom2nifti/common.py | is_siemens | def is_siemens(dicom_input):
"""
Use this function to detect if a dicom series is a siemens dataset
:param dicom_input: directory with dicom files for 1 scan
"""
# read dicom header
header = dicom_input[0]
# check if manufacturer is Siemens
if 'Manufacturer' not in header or 'Modality' not in header:
return False # we try generic conversion in these cases
# check if Modality is mr
if header.Modality.upper() != 'MR':
return False
if 'SIEMENS' not in header.Manufacturer.upper():
return False
return True | python | def is_siemens(dicom_input):
header = dicom_input[0]
if 'Manufacturer' not in header or 'Modality' not in header:
return False
if header.Modality.upper() != 'MR':
return False
if 'SIEMENS' not in header.Manufacturer.upper():
return False
return True | [
"def",
"is_siemens",
"(",
"dicom_input",
")",
":",
"# read dicom header",
"header",
"=",
"dicom_input",
"[",
"0",
"]",
"# check if manufacturer is Siemens",
"if",
"'Manufacturer'",
"not",
"in",
"header",
"or",
"'Modality'",
"not",
"in",
"header",
":",
"return",
"False",
"# we try generic conversion in these cases",
"# check if Modality is mr",
"if",
"header",
".",
"Modality",
".",
"upper",
"(",
")",
"!=",
"'MR'",
":",
"return",
"False",
"if",
"'SIEMENS'",
"not",
"in",
"header",
".",
"Manufacturer",
".",
"upper",
"(",
")",
":",
"return",
"False",
"return",
"True"
]
| Use this function to detect if a dicom series is a siemens dataset
:param dicom_input: directory with dicom files for 1 scan | [
"Use",
"this",
"function",
"to",
"detect",
"if",
"a",
"dicom",
"series",
"is",
"a",
"siemens",
"dataset"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/common.py#L126-L146 |
icometrix/dicom2nifti | dicom2nifti/common.py | is_multiframe_dicom | def is_multiframe_dicom(dicom_input):
"""
Use this function to detect if a dicom series is a siemens 4D dataset
NOTE: Only the first slice will be checked so you can only provide an already sorted dicom directory
(containing one series)
:param dicom_input: directory with dicom files for 1 scan
"""
# read dicom header
header = dicom_input[0]
if Tag(0x0002, 0x0002) not in header.file_meta:
return False
if header.file_meta[0x0002, 0x0002].value == '1.2.840.10008.5.1.4.1.1.4.1':
return True
return False | python | def is_multiframe_dicom(dicom_input):
header = dicom_input[0]
if Tag(0x0002, 0x0002) not in header.file_meta:
return False
if header.file_meta[0x0002, 0x0002].value == '1.2.840.10008.5.1.4.1.1.4.1':
return True
return False | [
"def",
"is_multiframe_dicom",
"(",
"dicom_input",
")",
":",
"# read dicom header",
"header",
"=",
"dicom_input",
"[",
"0",
"]",
"if",
"Tag",
"(",
"0x0002",
",",
"0x0002",
")",
"not",
"in",
"header",
".",
"file_meta",
":",
"return",
"False",
"if",
"header",
".",
"file_meta",
"[",
"0x0002",
",",
"0x0002",
"]",
".",
"value",
"==",
"'1.2.840.10008.5.1.4.1.1.4.1'",
":",
"return",
"True",
"return",
"False"
]
| Use this function to detect if a dicom series is a siemens 4D dataset
NOTE: Only the first slice will be checked so you can only provide an already sorted dicom directory
(containing one series)
:param dicom_input: directory with dicom files for 1 scan | [
"Use",
"this",
"function",
"to",
"detect",
"if",
"a",
"dicom",
"series",
"is",
"a",
"siemens",
"4D",
"dataset",
"NOTE",
":",
"Only",
"the",
"first",
"slice",
"will",
"be",
"checked",
"so",
"you",
"can",
"only",
"provide",
"an",
"already",
"sorted",
"dicom",
"directory",
"(",
"containing",
"one",
"series",
")"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/common.py#L149-L164 |
icometrix/dicom2nifti | dicom2nifti/common.py | is_valid_imaging_dicom | def is_valid_imaging_dicom(dicom_header):
"""
Function will do some basic checks to see if this is a valid imaging dicom
"""
# if it is philips and multiframe dicom then we assume it is ok
try:
if is_philips([dicom_header]):
if is_multiframe_dicom([dicom_header]):
return True
if "SeriesInstanceUID" not in dicom_header:
return False
if "InstanceNumber" not in dicom_header:
return False
if "ImageOrientationPatient" not in dicom_header or len(dicom_header.ImageOrientationPatient) < 6:
return False
if "ImagePositionPatient" not in dicom_header or len(dicom_header.ImagePositionPatient) < 3:
return False
# for all others if there is image position patient we assume it is ok
if Tag(0x0020, 0x0037) not in dicom_header:
return False
return True
except (KeyError, AttributeError):
return False | python | def is_valid_imaging_dicom(dicom_header):
try:
if is_philips([dicom_header]):
if is_multiframe_dicom([dicom_header]):
return True
if "SeriesInstanceUID" not in dicom_header:
return False
if "InstanceNumber" not in dicom_header:
return False
if "ImageOrientationPatient" not in dicom_header or len(dicom_header.ImageOrientationPatient) < 6:
return False
if "ImagePositionPatient" not in dicom_header or len(dicom_header.ImagePositionPatient) < 3:
return False
if Tag(0x0020, 0x0037) not in dicom_header:
return False
return True
except (KeyError, AttributeError):
return False | [
"def",
"is_valid_imaging_dicom",
"(",
"dicom_header",
")",
":",
"# if it is philips and multiframe dicom then we assume it is ok",
"try",
":",
"if",
"is_philips",
"(",
"[",
"dicom_header",
"]",
")",
":",
"if",
"is_multiframe_dicom",
"(",
"[",
"dicom_header",
"]",
")",
":",
"return",
"True",
"if",
"\"SeriesInstanceUID\"",
"not",
"in",
"dicom_header",
":",
"return",
"False",
"if",
"\"InstanceNumber\"",
"not",
"in",
"dicom_header",
":",
"return",
"False",
"if",
"\"ImageOrientationPatient\"",
"not",
"in",
"dicom_header",
"or",
"len",
"(",
"dicom_header",
".",
"ImageOrientationPatient",
")",
"<",
"6",
":",
"return",
"False",
"if",
"\"ImagePositionPatient\"",
"not",
"in",
"dicom_header",
"or",
"len",
"(",
"dicom_header",
".",
"ImagePositionPatient",
")",
"<",
"3",
":",
"return",
"False",
"# for all others if there is image position patient we assume it is ok",
"if",
"Tag",
"(",
"0x0020",
",",
"0x0037",
")",
"not",
"in",
"dicom_header",
":",
"return",
"False",
"return",
"True",
"except",
"(",
"KeyError",
",",
"AttributeError",
")",
":",
"return",
"False"
]
| Function will do some basic checks to see if this is a valid imaging dicom | [
"Function",
"will",
"do",
"some",
"basic",
"checks",
"to",
"see",
"if",
"this",
"is",
"a",
"valid",
"imaging",
"dicom"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/common.py#L167-L195 |
icometrix/dicom2nifti | dicom2nifti/common.py | get_volume_pixeldata | def get_volume_pixeldata(sorted_slices):
"""
the slice and intercept calculation can cause the slices to have different dtypes
we should get the correct dtype that can cover all of them
:type sorted_slices: list of slices
:param sorted_slices: sliced sored in the correct order to create volume
"""
slices = []
combined_dtype = None
for slice_ in sorted_slices:
slice_data = _get_slice_pixeldata(slice_)
slice_data = slice_data[numpy.newaxis, :, :]
slices.append(slice_data)
if combined_dtype is None:
combined_dtype = slice_data.dtype
else:
combined_dtype = numpy.promote_types(combined_dtype, slice_data.dtype)
# create the new volume with with the correct data
vol = numpy.concatenate(slices, axis=0)
# Done
vol = numpy.transpose(vol, (2, 1, 0))
return vol | python | def get_volume_pixeldata(sorted_slices):
slices = []
combined_dtype = None
for slice_ in sorted_slices:
slice_data = _get_slice_pixeldata(slice_)
slice_data = slice_data[numpy.newaxis, :, :]
slices.append(slice_data)
if combined_dtype is None:
combined_dtype = slice_data.dtype
else:
combined_dtype = numpy.promote_types(combined_dtype, slice_data.dtype)
vol = numpy.concatenate(slices, axis=0)
vol = numpy.transpose(vol, (2, 1, 0))
return vol | [
"def",
"get_volume_pixeldata",
"(",
"sorted_slices",
")",
":",
"slices",
"=",
"[",
"]",
"combined_dtype",
"=",
"None",
"for",
"slice_",
"in",
"sorted_slices",
":",
"slice_data",
"=",
"_get_slice_pixeldata",
"(",
"slice_",
")",
"slice_data",
"=",
"slice_data",
"[",
"numpy",
".",
"newaxis",
",",
":",
",",
":",
"]",
"slices",
".",
"append",
"(",
"slice_data",
")",
"if",
"combined_dtype",
"is",
"None",
":",
"combined_dtype",
"=",
"slice_data",
".",
"dtype",
"else",
":",
"combined_dtype",
"=",
"numpy",
".",
"promote_types",
"(",
"combined_dtype",
",",
"slice_data",
".",
"dtype",
")",
"# create the new volume with with the correct data",
"vol",
"=",
"numpy",
".",
"concatenate",
"(",
"slices",
",",
"axis",
"=",
"0",
")",
"# Done",
"vol",
"=",
"numpy",
".",
"transpose",
"(",
"vol",
",",
"(",
"2",
",",
"1",
",",
"0",
")",
")",
"return",
"vol"
]
| the slice and intercept calculation can cause the slices to have different dtypes
we should get the correct dtype that can cover all of them
:type sorted_slices: list of slices
:param sorted_slices: sliced sored in the correct order to create volume | [
"the",
"slice",
"and",
"intercept",
"calculation",
"can",
"cause",
"the",
"slices",
"to",
"have",
"different",
"dtypes",
"we",
"should",
"get",
"the",
"correct",
"dtype",
"that",
"can",
"cover",
"all",
"of",
"them",
":",
"type",
"sorted_slices",
":",
"list",
"of",
"slices",
":",
"param",
"sorted_slices",
":",
"sliced",
"sored",
"in",
"the",
"correct",
"order",
"to",
"create",
"volume"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/common.py#L198-L222 |
icometrix/dicom2nifti | dicom2nifti/common.py | _get_slice_pixeldata | def _get_slice_pixeldata(dicom_slice):
"""
the slice and intercept calculation can cause the slices to have different dtypes
we should get the correct dtype that can cover all of them
:type dicom_slice: pydicom object
:param dicom_slice: slice to get the pixeldata for
"""
data = dicom_slice.pixel_array
# fix overflow issues for signed data where BitsStored is lower than BitsAllocated and PixelReprentation = 1 (signed)
# for example a hitachi mri scan can have BitsAllocated 16 but BitsStored is 12 and HighBit 11
if dicom_slice.BitsAllocated != dicom_slice.BitsStored and \
dicom_slice.HighBit == dicom_slice.BitsStored - 1 and \
dicom_slice.PixelRepresentation == 1:
if dicom_slice.BitsAllocated == 16:
data = data.astype(numpy.int16) # assert that it is a signed type
max_value = pow(2, dicom_slice.HighBit) - 1
invert_value = -1 ^ max_value
data[data > max_value] = numpy.bitwise_or(data[data > max_value], invert_value)
pass
return apply_scaling(data, dicom_slice) | python | def _get_slice_pixeldata(dicom_slice):
data = dicom_slice.pixel_array
if dicom_slice.BitsAllocated != dicom_slice.BitsStored and \
dicom_slice.HighBit == dicom_slice.BitsStored - 1 and \
dicom_slice.PixelRepresentation == 1:
if dicom_slice.BitsAllocated == 16:
data = data.astype(numpy.int16)
max_value = pow(2, dicom_slice.HighBit) - 1
invert_value = -1 ^ max_value
data[data > max_value] = numpy.bitwise_or(data[data > max_value], invert_value)
pass
return apply_scaling(data, dicom_slice) | [
"def",
"_get_slice_pixeldata",
"(",
"dicom_slice",
")",
":",
"data",
"=",
"dicom_slice",
".",
"pixel_array",
"# fix overflow issues for signed data where BitsStored is lower than BitsAllocated and PixelReprentation = 1 (signed)",
"# for example a hitachi mri scan can have BitsAllocated 16 but BitsStored is 12 and HighBit 11",
"if",
"dicom_slice",
".",
"BitsAllocated",
"!=",
"dicom_slice",
".",
"BitsStored",
"and",
"dicom_slice",
".",
"HighBit",
"==",
"dicom_slice",
".",
"BitsStored",
"-",
"1",
"and",
"dicom_slice",
".",
"PixelRepresentation",
"==",
"1",
":",
"if",
"dicom_slice",
".",
"BitsAllocated",
"==",
"16",
":",
"data",
"=",
"data",
".",
"astype",
"(",
"numpy",
".",
"int16",
")",
"# assert that it is a signed type",
"max_value",
"=",
"pow",
"(",
"2",
",",
"dicom_slice",
".",
"HighBit",
")",
"-",
"1",
"invert_value",
"=",
"-",
"1",
"^",
"max_value",
"data",
"[",
"data",
">",
"max_value",
"]",
"=",
"numpy",
".",
"bitwise_or",
"(",
"data",
"[",
"data",
">",
"max_value",
"]",
",",
"invert_value",
")",
"pass",
"return",
"apply_scaling",
"(",
"data",
",",
"dicom_slice",
")"
]
| the slice and intercept calculation can cause the slices to have different dtypes
we should get the correct dtype that can cover all of them
:type dicom_slice: pydicom object
:param dicom_slice: slice to get the pixeldata for | [
"the",
"slice",
"and",
"intercept",
"calculation",
"can",
"cause",
"the",
"slices",
"to",
"have",
"different",
"dtypes",
"we",
"should",
"get",
"the",
"correct",
"dtype",
"that",
"can",
"cover",
"all",
"of",
"them"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/common.py#L225-L245 |
icometrix/dicom2nifti | dicom2nifti/common.py | get_numpy_type | def get_numpy_type(dicom_header):
"""
Make NumPy format code, e.g. "uint16", "int32" etc
from two pieces of info:
mosaic.PixelRepresentation -- 0 for unsigned, 1 for signed;
mosaic.BitsAllocated -- 8, 16, or 32
:param dicom_header: the read dicom file/headers
:returns: numpy format string
"""
format_string = '%sint%d' % (('u', '')[dicom_header.PixelRepresentation], dicom_header.BitsAllocated)
try:
numpy.dtype(format_string)
except TypeError:
raise TypeError("Data type not understood by NumPy: format='%s', PixelRepresentation=%d, BitsAllocated=%d" %
(format_string, dicom_header.PixelRepresentation, dicom_header.BitsAllocated))
return format_string | python | def get_numpy_type(dicom_header):
format_string = '%sint%d' % (('u', '')[dicom_header.PixelRepresentation], dicom_header.BitsAllocated)
try:
numpy.dtype(format_string)
except TypeError:
raise TypeError("Data type not understood by NumPy: format='%s', PixelRepresentation=%d, BitsAllocated=%d" %
(format_string, dicom_header.PixelRepresentation, dicom_header.BitsAllocated))
return format_string | [
"def",
"get_numpy_type",
"(",
"dicom_header",
")",
":",
"format_string",
"=",
"'%sint%d'",
"%",
"(",
"(",
"'u'",
",",
"''",
")",
"[",
"dicom_header",
".",
"PixelRepresentation",
"]",
",",
"dicom_header",
".",
"BitsAllocated",
")",
"try",
":",
"numpy",
".",
"dtype",
"(",
"format_string",
")",
"except",
"TypeError",
":",
"raise",
"TypeError",
"(",
"\"Data type not understood by NumPy: format='%s', PixelRepresentation=%d, BitsAllocated=%d\"",
"%",
"(",
"format_string",
",",
"dicom_header",
".",
"PixelRepresentation",
",",
"dicom_header",
".",
"BitsAllocated",
")",
")",
"return",
"format_string"
]
| Make NumPy format code, e.g. "uint16", "int32" etc
from two pieces of info:
mosaic.PixelRepresentation -- 0 for unsigned, 1 for signed;
mosaic.BitsAllocated -- 8, 16, or 32
:param dicom_header: the read dicom file/headers
:returns: numpy format string | [
"Make",
"NumPy",
"format",
"code",
"e",
".",
"g",
".",
"uint16",
"int32",
"etc",
"from",
"two",
"pieces",
"of",
"info",
":",
"mosaic",
".",
"PixelRepresentation",
"--",
"0",
"for",
"unsigned",
"1",
"for",
"signed",
";",
"mosaic",
".",
"BitsAllocated",
"--",
"8",
"16",
"or",
"32"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/common.py#L260-L277 |
icometrix/dicom2nifti | dicom2nifti/common.py | get_fd_array_value | def get_fd_array_value(tag, count):
"""
Getters for data that also work with implicit transfersyntax
:param count: number of items in the array
:param tag: the tag to read
"""
if tag.VR == 'OB' or tag.VR == 'UN':
values = []
for i in range(count):
start = i * 8
stop = (i + 1) * 8
values.append(struct.unpack('d', tag.value[start:stop])[0])
return numpy.array(values)
return tag.value | python | def get_fd_array_value(tag, count):
if tag.VR == 'OB' or tag.VR == 'UN':
values = []
for i in range(count):
start = i * 8
stop = (i + 1) * 8
values.append(struct.unpack('d', tag.value[start:stop])[0])
return numpy.array(values)
return tag.value | [
"def",
"get_fd_array_value",
"(",
"tag",
",",
"count",
")",
":",
"if",
"tag",
".",
"VR",
"==",
"'OB'",
"or",
"tag",
".",
"VR",
"==",
"'UN'",
":",
"values",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"count",
")",
":",
"start",
"=",
"i",
"*",
"8",
"stop",
"=",
"(",
"i",
"+",
"1",
")",
"*",
"8",
"values",
".",
"append",
"(",
"struct",
".",
"unpack",
"(",
"'d'",
",",
"tag",
".",
"value",
"[",
"start",
":",
"stop",
"]",
")",
"[",
"0",
"]",
")",
"return",
"numpy",
".",
"array",
"(",
"values",
")",
"return",
"tag",
".",
"value"
]
| Getters for data that also work with implicit transfersyntax
:param count: number of items in the array
:param tag: the tag to read | [
"Getters",
"for",
"data",
"that",
"also",
"work",
"with",
"implicit",
"transfersyntax"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/common.py#L280-L294 |
icometrix/dicom2nifti | dicom2nifti/common.py | get_fd_value | def get_fd_value(tag):
"""
Getters for data that also work with implicit transfersyntax
:param tag: the tag to read
"""
if tag.VR == 'OB' or tag.VR == 'UN':
value = struct.unpack('d', tag.value)[0]
return value
return tag.value | python | def get_fd_value(tag):
if tag.VR == 'OB' or tag.VR == 'UN':
value = struct.unpack('d', tag.value)[0]
return value
return tag.value | [
"def",
"get_fd_value",
"(",
"tag",
")",
":",
"if",
"tag",
".",
"VR",
"==",
"'OB'",
"or",
"tag",
".",
"VR",
"==",
"'UN'",
":",
"value",
"=",
"struct",
".",
"unpack",
"(",
"'d'",
",",
"tag",
".",
"value",
")",
"[",
"0",
"]",
"return",
"value",
"return",
"tag",
".",
"value"
]
| Getters for data that also work with implicit transfersyntax
:param tag: the tag to read | [
"Getters",
"for",
"data",
"that",
"also",
"work",
"with",
"implicit",
"transfersyntax"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/common.py#L297-L306 |
icometrix/dicom2nifti | dicom2nifti/common.py | set_fd_value | def set_fd_value(tag, value):
"""
Setters for data that also work with implicit transfersyntax
:param value: the value to set on the tag
:param tag: the tag to read
"""
if tag.VR == 'OB' or tag.VR == 'UN':
value = struct.pack('d', value)
tag.value = value | python | def set_fd_value(tag, value):
if tag.VR == 'OB' or tag.VR == 'UN':
value = struct.pack('d', value)
tag.value = value | [
"def",
"set_fd_value",
"(",
"tag",
",",
"value",
")",
":",
"if",
"tag",
".",
"VR",
"==",
"'OB'",
"or",
"tag",
".",
"VR",
"==",
"'UN'",
":",
"value",
"=",
"struct",
".",
"pack",
"(",
"'d'",
",",
"value",
")",
"tag",
".",
"value",
"=",
"value"
]
| Setters for data that also work with implicit transfersyntax
:param value: the value to set on the tag
:param tag: the tag to read | [
"Setters",
"for",
"data",
"that",
"also",
"work",
"with",
"implicit",
"transfersyntax"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/common.py#L309-L318 |
icometrix/dicom2nifti | dicom2nifti/common.py | get_fl_value | def get_fl_value(tag):
"""
Getters for data that also work with implicit transfersyntax
:param tag: the tag to read
"""
if tag.VR == 'OB' or tag.VR == 'UN':
value = struct.unpack('f', tag.value)[0]
return value
return tag.value | python | def get_fl_value(tag):
if tag.VR == 'OB' or tag.VR == 'UN':
value = struct.unpack('f', tag.value)[0]
return value
return tag.value | [
"def",
"get_fl_value",
"(",
"tag",
")",
":",
"if",
"tag",
".",
"VR",
"==",
"'OB'",
"or",
"tag",
".",
"VR",
"==",
"'UN'",
":",
"value",
"=",
"struct",
".",
"unpack",
"(",
"'f'",
",",
"tag",
".",
"value",
")",
"[",
"0",
"]",
"return",
"value",
"return",
"tag",
".",
"value"
]
| Getters for data that also work with implicit transfersyntax
:param tag: the tag to read | [
"Getters",
"for",
"data",
"that",
"also",
"work",
"with",
"implicit",
"transfersyntax"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/common.py#L321-L330 |
icometrix/dicom2nifti | dicom2nifti/common.py | get_is_value | def get_is_value(tag):
"""
Getters for data that also work with implicit transfersyntax
:param tag: the tag to read
"""
# data is int formatted as string so convert te string first and cast to int
if tag.VR == 'OB' or tag.VR == 'UN':
value = int(tag.value.decode("ascii").replace(" ", ""))
return value
return int(tag.value) | python | def get_is_value(tag):
if tag.VR == 'OB' or tag.VR == 'UN':
value = int(tag.value.decode("ascii").replace(" ", ""))
return value
return int(tag.value) | [
"def",
"get_is_value",
"(",
"tag",
")",
":",
"# data is int formatted as string so convert te string first and cast to int",
"if",
"tag",
".",
"VR",
"==",
"'OB'",
"or",
"tag",
".",
"VR",
"==",
"'UN'",
":",
"value",
"=",
"int",
"(",
"tag",
".",
"value",
".",
"decode",
"(",
"\"ascii\"",
")",
".",
"replace",
"(",
"\" \"",
",",
"\"\"",
")",
")",
"return",
"value",
"return",
"int",
"(",
"tag",
".",
"value",
")"
]
| Getters for data that also work with implicit transfersyntax
:param tag: the tag to read | [
"Getters",
"for",
"data",
"that",
"also",
"work",
"with",
"implicit",
"transfersyntax"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/common.py#L333-L343 |
icometrix/dicom2nifti | dicom2nifti/common.py | get_ss_value | def get_ss_value(tag):
"""
Getters for data that also work with implicit transfersyntax
:param tag: the tag to read
"""
# data is int formatted as string so convert te string first and cast to int
if tag.VR == 'OB' or tag.VR == 'UN':
value = struct.unpack('h', tag.value)[0]
return value
return tag.value | python | def get_ss_value(tag):
if tag.VR == 'OB' or tag.VR == 'UN':
value = struct.unpack('h', tag.value)[0]
return value
return tag.value | [
"def",
"get_ss_value",
"(",
"tag",
")",
":",
"# data is int formatted as string so convert te string first and cast to int",
"if",
"tag",
".",
"VR",
"==",
"'OB'",
"or",
"tag",
".",
"VR",
"==",
"'UN'",
":",
"value",
"=",
"struct",
".",
"unpack",
"(",
"'h'",
",",
"tag",
".",
"value",
")",
"[",
"0",
"]",
"return",
"value",
"return",
"tag",
".",
"value"
]
| Getters for data that also work with implicit transfersyntax
:param tag: the tag to read | [
"Getters",
"for",
"data",
"that",
"also",
"work",
"with",
"implicit",
"transfersyntax"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/common.py#L346-L356 |
icometrix/dicom2nifti | dicom2nifti/common.py | set_ss_value | def set_ss_value(tag, value):
"""
Setter for data that also work with implicit transfersyntax
:param value: the value to set on the tag
:param tag: the tag to read
"""
if tag.VR == 'OB' or tag.VR == 'UN':
value = struct.pack('h', value)
tag.value = value | python | def set_ss_value(tag, value):
if tag.VR == 'OB' or tag.VR == 'UN':
value = struct.pack('h', value)
tag.value = value | [
"def",
"set_ss_value",
"(",
"tag",
",",
"value",
")",
":",
"if",
"tag",
".",
"VR",
"==",
"'OB'",
"or",
"tag",
".",
"VR",
"==",
"'UN'",
":",
"value",
"=",
"struct",
".",
"pack",
"(",
"'h'",
",",
"value",
")",
"tag",
".",
"value",
"=",
"value"
]
| Setter for data that also work with implicit transfersyntax
:param value: the value to set on the tag
:param tag: the tag to read | [
"Setter",
"for",
"data",
"that",
"also",
"work",
"with",
"implicit",
"transfersyntax"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/common.py#L359-L368 |
icometrix/dicom2nifti | dicom2nifti/common.py | apply_scaling | def apply_scaling(data, dicom_headers):
"""
Rescale the data based on the RescaleSlope and RescaleOffset
Based on the scaling from pydicomseries
:param dicom_headers: dicom headers to use to retreive the scaling factors
:param data: the input data
"""
# Apply the rescaling if needed
private_scale_slope_tag = Tag(0x2005, 0x100E)
private_scale_intercept_tag = Tag(0x2005, 0x100D)
if 'RescaleSlope' in dicom_headers or 'RescaleIntercept' in dicom_headers \
or private_scale_slope_tag in dicom_headers or private_scale_intercept_tag in dicom_headers:
rescale_slope = 1
rescale_intercept = 0
if 'RescaleSlope' in dicom_headers:
rescale_slope = dicom_headers.RescaleSlope
if 'RescaleIntercept' in dicom_headers:
rescale_intercept = dicom_headers.RescaleIntercept
# try:
# # this section can sometimes fail due to unknown private fields
# if private_scale_slope_tag in dicom_headers:
# private_scale_slope = float(dicom_headers[private_scale_slope_tag].value)
# if private_scale_slope_tag in dicom_headers:
# private_scale_slope = float(dicom_headers[private_scale_slope_tag].value)
# except:
# pass
return do_scaling(data, rescale_slope, rescale_intercept)
else:
return data | python | def apply_scaling(data, dicom_headers):
private_scale_slope_tag = Tag(0x2005, 0x100E)
private_scale_intercept_tag = Tag(0x2005, 0x100D)
if 'RescaleSlope' in dicom_headers or 'RescaleIntercept' in dicom_headers \
or private_scale_slope_tag in dicom_headers or private_scale_intercept_tag in dicom_headers:
rescale_slope = 1
rescale_intercept = 0
if 'RescaleSlope' in dicom_headers:
rescale_slope = dicom_headers.RescaleSlope
if 'RescaleIntercept' in dicom_headers:
rescale_intercept = dicom_headers.RescaleIntercept
return do_scaling(data, rescale_slope, rescale_intercept)
else:
return data | [
"def",
"apply_scaling",
"(",
"data",
",",
"dicom_headers",
")",
":",
"# Apply the rescaling if needed",
"private_scale_slope_tag",
"=",
"Tag",
"(",
"0x2005",
",",
"0x100E",
")",
"private_scale_intercept_tag",
"=",
"Tag",
"(",
"0x2005",
",",
"0x100D",
")",
"if",
"'RescaleSlope'",
"in",
"dicom_headers",
"or",
"'RescaleIntercept'",
"in",
"dicom_headers",
"or",
"private_scale_slope_tag",
"in",
"dicom_headers",
"or",
"private_scale_intercept_tag",
"in",
"dicom_headers",
":",
"rescale_slope",
"=",
"1",
"rescale_intercept",
"=",
"0",
"if",
"'RescaleSlope'",
"in",
"dicom_headers",
":",
"rescale_slope",
"=",
"dicom_headers",
".",
"RescaleSlope",
"if",
"'RescaleIntercept'",
"in",
"dicom_headers",
":",
"rescale_intercept",
"=",
"dicom_headers",
".",
"RescaleIntercept",
"# try:",
"# # this section can sometimes fail due to unknown private fields",
"# if private_scale_slope_tag in dicom_headers:",
"# private_scale_slope = float(dicom_headers[private_scale_slope_tag].value)",
"# if private_scale_slope_tag in dicom_headers:",
"# private_scale_slope = float(dicom_headers[private_scale_slope_tag].value)",
"# except:",
"# pass",
"return",
"do_scaling",
"(",
"data",
",",
"rescale_slope",
",",
"rescale_intercept",
")",
"else",
":",
"return",
"data"
]
| Rescale the data based on the RescaleSlope and RescaleOffset
Based on the scaling from pydicomseries
:param dicom_headers: dicom headers to use to retreive the scaling factors
:param data: the input data | [
"Rescale",
"the",
"data",
"based",
"on",
"the",
"RescaleSlope",
"and",
"RescaleOffset",
"Based",
"on",
"the",
"scaling",
"from",
"pydicomseries"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/common.py#L371-L401 |
icometrix/dicom2nifti | dicom2nifti/common.py | write_bvec_file | def write_bvec_file(bvecs, bvec_file):
"""
Write an array of bvecs to a bvec file
:param bvecs: array with the vectors
:param bvec_file: filepath to write to
"""
if bvec_file is None:
return
logger.info('Saving BVEC file: %s' % bvec_file)
with open(bvec_file, 'w') as text_file:
# Map a dicection to string join them using a space and write to the file
text_file.write('%s\n' % ' '.join(map(str, bvecs[:, 0])))
text_file.write('%s\n' % ' '.join(map(str, bvecs[:, 1])))
text_file.write('%s\n' % ' '.join(map(str, bvecs[:, 2]))) | python | def write_bvec_file(bvecs, bvec_file):
if bvec_file is None:
return
logger.info('Saving BVEC file: %s' % bvec_file)
with open(bvec_file, 'w') as text_file:
text_file.write('%s\n' % ' '.join(map(str, bvecs[:, 0])))
text_file.write('%s\n' % ' '.join(map(str, bvecs[:, 1])))
text_file.write('%s\n' % ' '.join(map(str, bvecs[:, 2]))) | [
"def",
"write_bvec_file",
"(",
"bvecs",
",",
"bvec_file",
")",
":",
"if",
"bvec_file",
"is",
"None",
":",
"return",
"logger",
".",
"info",
"(",
"'Saving BVEC file: %s'",
"%",
"bvec_file",
")",
"with",
"open",
"(",
"bvec_file",
",",
"'w'",
")",
"as",
"text_file",
":",
"# Map a dicection to string join them using a space and write to the file",
"text_file",
".",
"write",
"(",
"'%s\\n'",
"%",
"' '",
".",
"join",
"(",
"map",
"(",
"str",
",",
"bvecs",
"[",
":",
",",
"0",
"]",
")",
")",
")",
"text_file",
".",
"write",
"(",
"'%s\\n'",
"%",
"' '",
".",
"join",
"(",
"map",
"(",
"str",
",",
"bvecs",
"[",
":",
",",
"1",
"]",
")",
")",
")",
"text_file",
".",
"write",
"(",
"'%s\\n'",
"%",
"' '",
".",
"join",
"(",
"map",
"(",
"str",
",",
"bvecs",
"[",
":",
",",
"2",
"]",
")",
")",
")"
]
| Write an array of bvecs to a bvec file
:param bvecs: array with the vectors
:param bvec_file: filepath to write to | [
"Write",
"an",
"array",
"of",
"bvecs",
"to",
"a",
"bvec",
"file"
]
| train | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/common.py#L478-L492 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.