body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
1983e37af70d8fc444ba6ac2e2b09676b6ecd438730eb6ac549eb49d46ee346c | def preorder_print(self, start, traversal):
'\n Prints the nodes using a preorder traversal.\n '
if start:
traversal += (str(start.value) + '-')
traversal = self.preorder_print(start.left, traversal)
traversal = self.preorder_print(start.right, traversal)
return traversal | Prints the nodes using a preorder traversal. | containers/BinaryTree.py | preorder_print | vbopardi/Week8Containers | 0 | python | def preorder_print(self, start, traversal):
'\n \n '
if start:
traversal += (str(start.value) + '-')
traversal = self.preorder_print(start.left, traversal)
traversal = self.preorder_print(start.right, traversal)
return traversal | def preorder_print(self, start, traversal):
'\n \n '
if start:
traversal += (str(start.value) + '-')
traversal = self.preorder_print(start.left, traversal)
traversal = self.preorder_print(start.right, traversal)
return traversal<|docstring|>Prints the nodes using a preorder traversal.<|endoftext|> |
d606df99a43109b57fdb3d68b0e905c47b4349dad16b90ad62ecfdea1cc66983 | def inorder_print(self, start, traversal):
'\n Prints the nodes using a inorder traversal.\n '
if start:
traversal = self.inorder_print(start.left, traversal)
traversal += (str(start.value) + '-')
traversal = self.inorder_print(start.right, traversal)
return traversal | Prints the nodes using a inorder traversal. | containers/BinaryTree.py | inorder_print | vbopardi/Week8Containers | 0 | python | def inorder_print(self, start, traversal):
'\n \n '
if start:
traversal = self.inorder_print(start.left, traversal)
traversal += (str(start.value) + '-')
traversal = self.inorder_print(start.right, traversal)
return traversal | def inorder_print(self, start, traversal):
'\n \n '
if start:
traversal = self.inorder_print(start.left, traversal)
traversal += (str(start.value) + '-')
traversal = self.inorder_print(start.right, traversal)
return traversal<|docstring|>Prints the nodes using a inorder traversal.<|endoftext|> |
1017d917397cfccc7e32bfdcd79edd750948430950e53a00bcb116989155bca3 | def postorder_print(self, start, traversal):
'\n Prints the nodes using a postorder traversal.\n '
if start:
traversal = self.postorder_print(start.left, traversal)
traversal = self.postorder_print(start.right, traversal)
traversal += (str(start.value) + '-')
return traversal | Prints the nodes using a postorder traversal. | containers/BinaryTree.py | postorder_print | vbopardi/Week8Containers | 0 | python | def postorder_print(self, start, traversal):
'\n \n '
if start:
traversal = self.postorder_print(start.left, traversal)
traversal = self.postorder_print(start.right, traversal)
traversal += (str(start.value) + '-')
return traversal | def postorder_print(self, start, traversal):
'\n \n '
if start:
traversal = self.postorder_print(start.left, traversal)
traversal = self.postorder_print(start.right, traversal)
traversal += (str(start.value) + '-')
return traversal<|docstring|>Prints the nodes using a postorder traversal.<|endoftext|> |
2c092957bef3bfbee51f6eba9006927a481a7471f4cc37789534dce4b3c36434 | def to_list(self, traversal_type):
'\n This function is similar to the print_tree function,\n but instead of printing the tree,\n it returns the contents of the tree as a list.\n\n FIXME:\n Implement this function by modifying the _print functions above.\n '
if (traversal_type == 'preorder'):
return self.preorder(self.root, [])
elif (traversal_type == 'inorder'):
return self.inorder(self.root, [])
elif (traversal_type == 'postorder'):
return self.postorder(self.root, [])
else:
tt = str(traversal_type)
raise ValueError((('Traversal type ' + tt) + ' is not supported.')) | This function is similar to the print_tree function,
but instead of printing the tree,
it returns the contents of the tree as a list.
FIXME:
Implement this function by modifying the _print functions above. | containers/BinaryTree.py | to_list | vbopardi/Week8Containers | 0 | python | def to_list(self, traversal_type):
'\n This function is similar to the print_tree function,\n but instead of printing the tree,\n it returns the contents of the tree as a list.\n\n FIXME:\n Implement this function by modifying the _print functions above.\n '
if (traversal_type == 'preorder'):
return self.preorder(self.root, [])
elif (traversal_type == 'inorder'):
return self.inorder(self.root, [])
elif (traversal_type == 'postorder'):
return self.postorder(self.root, [])
else:
tt = str(traversal_type)
raise ValueError((('Traversal type ' + tt) + ' is not supported.')) | def to_list(self, traversal_type):
'\n This function is similar to the print_tree function,\n but instead of printing the tree,\n it returns the contents of the tree as a list.\n\n FIXME:\n Implement this function by modifying the _print functions above.\n '
if (traversal_type == 'preorder'):
return self.preorder(self.root, [])
elif (traversal_type == 'inorder'):
return self.inorder(self.root, [])
elif (traversal_type == 'postorder'):
return self.postorder(self.root, [])
else:
tt = str(traversal_type)
raise ValueError((('Traversal type ' + tt) + ' is not supported.'))<|docstring|>This function is similar to the print_tree function,
but instead of printing the tree,
it returns the contents of the tree as a list.
FIXME:
Implement this function by modifying the _print functions above.<|endoftext|> |
abb28bdd89117a042dd08d3c051c2945c0acad5af7ec353158c98b2e7faee828 | def preorder(self, start, traversal):
'\n FIXME:\n Implement this function by modifying the _print functions above.\n '
if start:
traversal.append(start.value)
traversal = self.preorder(start.left, traversal)
traversal = self.preorder(start.right, traversal)
return traversal | FIXME:
Implement this function by modifying the _print functions above. | containers/BinaryTree.py | preorder | vbopardi/Week8Containers | 0 | python | def preorder(self, start, traversal):
'\n FIXME:\n Implement this function by modifying the _print functions above.\n '
if start:
traversal.append(start.value)
traversal = self.preorder(start.left, traversal)
traversal = self.preorder(start.right, traversal)
return traversal | def preorder(self, start, traversal):
'\n FIXME:\n Implement this function by modifying the _print functions above.\n '
if start:
traversal.append(start.value)
traversal = self.preorder(start.left, traversal)
traversal = self.preorder(start.right, traversal)
return traversal<|docstring|>FIXME:
Implement this function by modifying the _print functions above.<|endoftext|> |
12aa408d85e6bd43bb0ade9e5b579738962b429f4d9b2b29a88e6e82814b3a55 | def inorder(self, start, traversal):
'\n FIXME:\n Implement this function by modifying the _print functions above.\n '
if start:
traversal = self.inorder(start.left, traversal)
traversal.append(start.value)
traversal = self.inorder(start.right, traversal)
return traversal | FIXME:
Implement this function by modifying the _print functions above. | containers/BinaryTree.py | inorder | vbopardi/Week8Containers | 0 | python | def inorder(self, start, traversal):
'\n FIXME:\n Implement this function by modifying the _print functions above.\n '
if start:
traversal = self.inorder(start.left, traversal)
traversal.append(start.value)
traversal = self.inorder(start.right, traversal)
return traversal | def inorder(self, start, traversal):
'\n FIXME:\n Implement this function by modifying the _print functions above.\n '
if start:
traversal = self.inorder(start.left, traversal)
traversal.append(start.value)
traversal = self.inorder(start.right, traversal)
return traversal<|docstring|>FIXME:
Implement this function by modifying the _print functions above.<|endoftext|> |
92fe47bb116b84679cd371c77d96e7926a4f982fdef1d028c74902001ed8197a | def postorder(self, start, traversal):
'\n FIXME:\n Implement this function by modifying the _print functions above.\n '
if start:
traversal = self.postorder(start.left, traversal)
traversal = self.postorder(start.right, traversal)
traversal.append(start.value)
return traversal | FIXME:
Implement this function by modifying the _print functions above. | containers/BinaryTree.py | postorder | vbopardi/Week8Containers | 0 | python | def postorder(self, start, traversal):
'\n FIXME:\n Implement this function by modifying the _print functions above.\n '
if start:
traversal = self.postorder(start.left, traversal)
traversal = self.postorder(start.right, traversal)
traversal.append(start.value)
return traversal | def postorder(self, start, traversal):
'\n FIXME:\n Implement this function by modifying the _print functions above.\n '
if start:
traversal = self.postorder(start.left, traversal)
traversal = self.postorder(start.right, traversal)
traversal.append(start.value)
return traversal<|docstring|>FIXME:
Implement this function by modifying the _print functions above.<|endoftext|> |
72818589dfbe60ffe3a6b970b5beead6c5badcbd010027b38e423216ad46df43 | def __len__(self):
'\n Returns the number of elements contained in the tree.\n Recall that `tree.__len__()` will desugar to `size(len)`.\n '
return BinaryTree.__len__helper(self.root) | Returns the number of elements contained in the tree.
Recall that `tree.__len__()` will desugar to `size(len)`. | containers/BinaryTree.py | __len__ | vbopardi/Week8Containers | 0 | python | def __len__(self):
'\n Returns the number of elements contained in the tree.\n Recall that `tree.__len__()` will desugar to `size(len)`.\n '
return BinaryTree.__len__helper(self.root) | def __len__(self):
'\n Returns the number of elements contained in the tree.\n Recall that `tree.__len__()` will desugar to `size(len)`.\n '
return BinaryTree.__len__helper(self.root)<|docstring|>Returns the number of elements contained in the tree.
Recall that `tree.__len__()` will desugar to `size(len)`.<|endoftext|> |
2647cd6b0cf491361d453e233b6e5f4de79b6b7fc1f2a2405902998995230a66 | @staticmethod
def __len__helper(node):
'\n FIXME:\n Implement this function.\n\n HINT:\n The pseudocode is:\n add 1 for the current node;\n return the sum of these three steps\n '
if (not node):
return 0
length = 1
if node.left:
length += BinaryTree.__len__helper(node.left)
if node.right:
length += BinaryTree.__len__helper(node.right)
return length | FIXME:
Implement this function.
HINT:
The pseudocode is:
add 1 for the current node;
return the sum of these three steps | containers/BinaryTree.py | __len__helper | vbopardi/Week8Containers | 0 | python | @staticmethod
def __len__helper(node):
'\n FIXME:\n Implement this function.\n\n HINT:\n The pseudocode is:\n add 1 for the current node;\n return the sum of these three steps\n '
if (not node):
return 0
length = 1
if node.left:
length += BinaryTree.__len__helper(node.left)
if node.right:
length += BinaryTree.__len__helper(node.right)
return length | @staticmethod
def __len__helper(node):
'\n FIXME:\n Implement this function.\n\n HINT:\n The pseudocode is:\n add 1 for the current node;\n return the sum of these three steps\n '
if (not node):
return 0
length = 1
if node.left:
length += BinaryTree.__len__helper(node.left)
if node.right:
length += BinaryTree.__len__helper(node.right)
return length<|docstring|>FIXME:
Implement this function.
HINT:
The pseudocode is:
add 1 for the current node;
return the sum of these three steps<|endoftext|> |
fee45e88cb5c960b76bd98672f0403b81b40106e108ed63c59143a45faebb87d | def height(self):
'\n Returns the height of the tree.\n\n FIXME:\n Implement this function.\n\n HINT:\n See how the __len__ method calls its helper staticmethod.\n '
return BinaryTree._height(self.root) | Returns the height of the tree.
FIXME:
Implement this function.
HINT:
See how the __len__ method calls its helper staticmethod. | containers/BinaryTree.py | height | vbopardi/Week8Containers | 0 | python | def height(self):
'\n Returns the height of the tree.\n\n FIXME:\n Implement this function.\n\n HINT:\n See how the __len__ method calls its helper staticmethod.\n '
return BinaryTree._height(self.root) | def height(self):
'\n Returns the height of the tree.\n\n FIXME:\n Implement this function.\n\n HINT:\n See how the __len__ method calls its helper staticmethod.\n '
return BinaryTree._height(self.root)<|docstring|>Returns the height of the tree.
FIXME:
Implement this function.
HINT:
See how the __len__ method calls its helper staticmethod.<|endoftext|> |
2f083861d837de503db33e40eeaf6fc737af197c4731e48133903340deaae6f0 | @staticmethod
def _height(node):
'\n FIXME:\n Implement this function.\n\n HINT:\n The pseudocode is:\n if a left child exists, calculate the _height of the left child;\n if a right child exists, calculate the _height of the right child;\n '
if (not node):
return (- 1)
else:
height_l = BinaryTree._height(node.left)
height_r = BinaryTree._height(node.right)
return (1 + max(height_l, height_r)) | FIXME:
Implement this function.
HINT:
The pseudocode is:
if a left child exists, calculate the _height of the left child;
if a right child exists, calculate the _height of the right child; | containers/BinaryTree.py | _height | vbopardi/Week8Containers | 0 | python | @staticmethod
def _height(node):
'\n FIXME:\n Implement this function.\n\n HINT:\n The pseudocode is:\n if a left child exists, calculate the _height of the left child;\n if a right child exists, calculate the _height of the right child;\n '
if (not node):
return (- 1)
else:
height_l = BinaryTree._height(node.left)
height_r = BinaryTree._height(node.right)
return (1 + max(height_l, height_r)) | @staticmethod
def _height(node):
'\n FIXME:\n Implement this function.\n\n HINT:\n The pseudocode is:\n if a left child exists, calculate the _height of the left child;\n if a right child exists, calculate the _height of the right child;\n '
if (not node):
return (- 1)
else:
height_l = BinaryTree._height(node.left)
height_r = BinaryTree._height(node.right)
return (1 + max(height_l, height_r))<|docstring|>FIXME:
Implement this function.
HINT:
The pseudocode is:
if a left child exists, calculate the _height of the left child;
if a right child exists, calculate the _height of the right child;<|endoftext|> |
c6e2b6aa29414ee9927616c0df6f5b5fbca2b936d1cb71b30a04bad7f578a090 | def register_visualisation(self, widget, module, label='Visualisation', glue=None):
'\n called from notebook\n\n if module_class is None and module_id is None:\n raise ValueError("One and only one of \'module_class\' and \'module_id\' args must be defined")\n if not(module_class is None or module_id is None):\n raise ValueError("One and only one of \'module_class\' and \'module_id\' args must be defined")\n '
linkable = hasattr(widget, 'link_module')
if ((not linkable) and (glue is None)):
raise ValueError("Registering a visualisation requires a linkable widget (i.e. which implements the 'link_module' interface) or 'glue' arg to be provides with a valid 'glue' function")
if (glue is not None):
self.other_coros += glue(widget, module)
else:
self.other_coros += widget.link_module(module, refresh=False)
self.vis_register[module.name].append((widget, label)) | called from notebook
if module_class is None and module_id is None:
raise ValueError("One and only one of 'module_class' and 'module_id' args must be defined")
if not(module_class is None or module_id is None):
raise ValueError("One and only one of 'module_class' and 'module_id' args must be defined") | widgets/progressivis_nb_widgets/nbwidgets/psboard.py | register_visualisation | jdfekete/progressivis | 51 | python | def register_visualisation(self, widget, module, label='Visualisation', glue=None):
'\n called from notebook\n\n if module_class is None and module_id is None:\n raise ValueError("One and only one of \'module_class\' and \'module_id\' args must be defined")\n if not(module_class is None or module_id is None):\n raise ValueError("One and only one of \'module_class\' and \'module_id\' args must be defined")\n '
linkable = hasattr(widget, 'link_module')
if ((not linkable) and (glue is None)):
raise ValueError("Registering a visualisation requires a linkable widget (i.e. which implements the 'link_module' interface) or 'glue' arg to be provides with a valid 'glue' function")
if (glue is not None):
self.other_coros += glue(widget, module)
else:
self.other_coros += widget.link_module(module, refresh=False)
self.vis_register[module.name].append((widget, label)) | def register_visualisation(self, widget, module, label='Visualisation', glue=None):
'\n called from notebook\n\n if module_class is None and module_id is None:\n raise ValueError("One and only one of \'module_class\' and \'module_id\' args must be defined")\n if not(module_class is None or module_id is None):\n raise ValueError("One and only one of \'module_class\' and \'module_id\' args must be defined")\n '
linkable = hasattr(widget, 'link_module')
if ((not linkable) and (glue is None)):
raise ValueError("Registering a visualisation requires a linkable widget (i.e. which implements the 'link_module' interface) or 'glue' arg to be provides with a valid 'glue' function")
if (glue is not None):
self.other_coros += glue(widget, module)
else:
self.other_coros += widget.link_module(module, refresh=False)
self.vis_register[module.name].append((widget, label))<|docstring|>called from notebook
if module_class is None and module_id is None:
raise ValueError("One and only one of 'module_class' and 'module_id' args must be defined")
if not(module_class is None or module_id is None):
raise ValueError("One and only one of 'module_class' and 'module_id' args must be defined")<|endoftext|> |
5bf54f3770c0964a1ed8be38b305175aa43b848ad6528c789cb82394aa9ac451 | @log_start_end(log=logger)
def sec_filings(ticker: str, num: int, export: str):
'Display SEC filings for a given stock ticker. [Source: Market Watch]\n\n Parameters\n ----------\n ticker : str\n Stock ticker\n num : int\n Number of ratings to display\n export : str\n Export dataframe data to csv,json,xlsx file\n '
df_financials = marketwatch_model.get_sec_filings(ticker)
print_rich_table(df_financials.head(num), headers=list(df_financials.columns), show_index=True, title='SEC Filings')
export_data(export, os.path.dirname(os.path.abspath(__file__)), 'sec', df_financials) | Display SEC filings for a given stock ticker. [Source: Market Watch]
Parameters
----------
ticker : str
Stock ticker
num : int
Number of ratings to display
export : str
Export dataframe data to csv,json,xlsx file | openbb_terminal/stocks/due_diligence/marketwatch_view.py | sec_filings | jmaslek/OpenBBTerminal | 255 | python | @log_start_end(log=logger)
def sec_filings(ticker: str, num: int, export: str):
'Display SEC filings for a given stock ticker. [Source: Market Watch]\n\n Parameters\n ----------\n ticker : str\n Stock ticker\n num : int\n Number of ratings to display\n export : str\n Export dataframe data to csv,json,xlsx file\n '
df_financials = marketwatch_model.get_sec_filings(ticker)
print_rich_table(df_financials.head(num), headers=list(df_financials.columns), show_index=True, title='SEC Filings')
export_data(export, os.path.dirname(os.path.abspath(__file__)), 'sec', df_financials) | @log_start_end(log=logger)
def sec_filings(ticker: str, num: int, export: str):
'Display SEC filings for a given stock ticker. [Source: Market Watch]\n\n Parameters\n ----------\n ticker : str\n Stock ticker\n num : int\n Number of ratings to display\n export : str\n Export dataframe data to csv,json,xlsx file\n '
df_financials = marketwatch_model.get_sec_filings(ticker)
print_rich_table(df_financials.head(num), headers=list(df_financials.columns), show_index=True, title='SEC Filings')
export_data(export, os.path.dirname(os.path.abspath(__file__)), 'sec', df_financials)<|docstring|>Display SEC filings for a given stock ticker. [Source: Market Watch]
Parameters
----------
ticker : str
Stock ticker
num : int
Number of ratings to display
export : str
Export dataframe data to csv,json,xlsx file<|endoftext|> |
181d0a07cfa6ec91870822c7ae19c99a6bda9ebe6075f9b9606a095cbc4cf70b | def __init__(self):
'\n Construct a PikaStack object.\n '
self.elements = [] | Construct a PikaStack object. | pikapy/stack.py | __init__ | DanyGLewin/pykachu | 0 | python | def __init__(self):
'\n \n '
self.elements = [] | def __init__(self):
'\n \n '
self.elements = []<|docstring|>Construct a PikaStack object.<|endoftext|> |
ce798bcc98c1690df750b31bcb5c1bc3282c9b53a48c62950dfae7e2f661713d | def ADD(self):
'\n Add the top two elements on the stack.\n\n Adds the top two elements on the stack and pushes the result back onto \n the stack.\n \n Error handling:\n If the stack is empty, nothing happens.\n If the stack only has a single element, the result pushed to the top of\n the stack is equal to the current top.\n '
a = self.POP()
b = self.POP()
c = (a + b)
self.PUSH(b)
self.PUSH(a)
self.PUSH(c) | Add the top two elements on the stack.
Adds the top two elements on the stack and pushes the result back onto
the stack.
Error handling:
If the stack is empty, nothing happens.
If the stack only has a single element, the result pushed to the top of
the stack is equal to the current top. | pikapy/stack.py | ADD | DanyGLewin/pykachu | 0 | python | def ADD(self):
'\n Add the top two elements on the stack.\n\n Adds the top two elements on the stack and pushes the result back onto \n the stack.\n \n Error handling:\n If the stack is empty, nothing happens.\n If the stack only has a single element, the result pushed to the top of\n the stack is equal to the current top.\n '
a = self.POP()
b = self.POP()
c = (a + b)
self.PUSH(b)
self.PUSH(a)
self.PUSH(c) | def ADD(self):
'\n Add the top two elements on the stack.\n\n Adds the top two elements on the stack and pushes the result back onto \n the stack.\n \n Error handling:\n If the stack is empty, nothing happens.\n If the stack only has a single element, the result pushed to the top of\n the stack is equal to the current top.\n '
a = self.POP()
b = self.POP()
c = (a + b)
self.PUSH(b)
self.PUSH(a)
self.PUSH(c)<|docstring|>Add the top two elements on the stack.
Adds the top two elements on the stack and pushes the result back onto
the stack.
Error handling:
If the stack is empty, nothing happens.
If the stack only has a single element, the result pushed to the top of
the stack is equal to the current top.<|endoftext|> |
24c3a20b40cf38fd98e56e54521d63c9d84aed41afd2279b03a8339f237f415b | def SUB(self):
'\n Subtracts the top two elements.\n \n Subtracts the first element on the stack from the second element and\n pushes the result back onto the stack.\n\n Error Handling:\n If the stack is empty, nothing happens.\n If the stack only has a single element, the result pushed to the top of\n the stack is -top\n '
a = self.POP()
b = self.POP()
c = (b - a)
self.PUSH(b)
self.PUSH(a)
self.PUSH(c) | Subtracts the top two elements.
Subtracts the first element on the stack from the second element and
pushes the result back onto the stack.
Error Handling:
If the stack is empty, nothing happens.
If the stack only has a single element, the result pushed to the top of
the stack is -top | pikapy/stack.py | SUB | DanyGLewin/pykachu | 0 | python | def SUB(self):
'\n Subtracts the top two elements.\n \n Subtracts the first element on the stack from the second element and\n pushes the result back onto the stack.\n\n Error Handling:\n If the stack is empty, nothing happens.\n If the stack only has a single element, the result pushed to the top of\n the stack is -top\n '
a = self.POP()
b = self.POP()
c = (b - a)
self.PUSH(b)
self.PUSH(a)
self.PUSH(c) | def SUB(self):
'\n Subtracts the top two elements.\n \n Subtracts the first element on the stack from the second element and\n pushes the result back onto the stack.\n\n Error Handling:\n If the stack is empty, nothing happens.\n If the stack only has a single element, the result pushed to the top of\n the stack is -top\n '
a = self.POP()
b = self.POP()
c = (b - a)
self.PUSH(b)
self.PUSH(a)
self.PUSH(c)<|docstring|>Subtracts the top two elements.
Subtracts the first element on the stack from the second element and
pushes the result back onto the stack.
Error Handling:
If the stack is empty, nothing happens.
If the stack only has a single element, the result pushed to the top of
the stack is -top<|endoftext|> |
9b44dece80f19697c75610b74f1ca1da63ceeb399d303b93b750a640e19159ce | def MULT(self):
'\n Multiplies the top two elements on the stack.\n\n Multiplies the top two elements on the stack and pushes the result back\n onto the stack.\n\n Error handling:\n If the stack is empty, nothing happens.\n If the stack only has a single element, the result pushed to the top of \n the stack is 0\n '
a = self.POP()
b = self.POP()
c = (a * b)
self.PUSH(b)
self.PUSH(a)
self.PUSH(c) | Multiplies the top two elements on the stack.
Multiplies the top two elements on the stack and pushes the result back
onto the stack.
Error handling:
If the stack is empty, nothing happens.
If the stack only has a single element, the result pushed to the top of
the stack is 0 | pikapy/stack.py | MULT | DanyGLewin/pykachu | 0 | python | def MULT(self):
'\n Multiplies the top two elements on the stack.\n\n Multiplies the top two elements on the stack and pushes the result back\n onto the stack.\n\n Error handling:\n If the stack is empty, nothing happens.\n If the stack only has a single element, the result pushed to the top of \n the stack is 0\n '
a = self.POP()
b = self.POP()
c = (a * b)
self.PUSH(b)
self.PUSH(a)
self.PUSH(c) | def MULT(self):
'\n Multiplies the top two elements on the stack.\n\n Multiplies the top two elements on the stack and pushes the result back\n onto the stack.\n\n Error handling:\n If the stack is empty, nothing happens.\n If the stack only has a single element, the result pushed to the top of \n the stack is 0\n '
a = self.POP()
b = self.POP()
c = (a * b)
self.PUSH(b)
self.PUSH(a)
self.PUSH(c)<|docstring|>Multiplies the top two elements on the stack.
Multiplies the top two elements on the stack and pushes the result back
onto the stack.
Error handling:
If the stack is empty, nothing happens.
If the stack only has a single element, the result pushed to the top of
the stack is 0<|endoftext|> |
1ddd14d5e21d52f6e285f68dc8f1547d698066a90650d16ea338fe0af5573c5c | def DIV(self):
'\n Divides the top two elements on the stack\n\n Divides the second element on the stack by the first element on the stack,\n and pushes the result back on top of the stack.\n \n Error Handling:\n If the stack is empty, nothing happens.\n If the stack only has a single element, the result pushed to the top of \n the stack is 0\n If the divisor is \'0\', the result pushed to the top of the stack is \n float("NaN")\n '
a = self.POP()
b = self.POP()
if (a == 0):
self.PUSH(float('NaN'))
else:
c = (b // a)
self.PUSH(b)
self.PUSH(a)
self.PUSH(c) | Divides the top two elements on the stack
Divides the second element on the stack by the first element on the stack,
and pushes the result back on top of the stack.
Error Handling:
If the stack is empty, nothing happens.
If the stack only has a single element, the result pushed to the top of
the stack is 0
If the divisor is '0', the result pushed to the top of the stack is
float("NaN") | pikapy/stack.py | DIV | DanyGLewin/pykachu | 0 | python | def DIV(self):
'\n Divides the top two elements on the stack\n\n Divides the second element on the stack by the first element on the stack,\n and pushes the result back on top of the stack.\n \n Error Handling:\n If the stack is empty, nothing happens.\n If the stack only has a single element, the result pushed to the top of \n the stack is 0\n If the divisor is \'0\', the result pushed to the top of the stack is \n float("NaN")\n '
a = self.POP()
b = self.POP()
if (a == 0):
self.PUSH(float('NaN'))
else:
c = (b // a)
self.PUSH(b)
self.PUSH(a)
self.PUSH(c) | def DIV(self):
'\n Divides the top two elements on the stack\n\n Divides the second element on the stack by the first element on the stack,\n and pushes the result back on top of the stack.\n \n Error Handling:\n If the stack is empty, nothing happens.\n If the stack only has a single element, the result pushed to the top of \n the stack is 0\n If the divisor is \'0\', the result pushed to the top of the stack is \n float("NaN")\n '
a = self.POP()
b = self.POP()
if (a == 0):
self.PUSH(float('NaN'))
else:
c = (b // a)
self.PUSH(b)
self.PUSH(a)
self.PUSH(c)<|docstring|>Divides the top two elements on the stack
Divides the second element on the stack by the first element on the stack,
and pushes the result back on top of the stack.
Error Handling:
If the stack is empty, nothing happens.
If the stack only has a single element, the result pushed to the top of
the stack is 0
If the divisor is '0', the result pushed to the top of the stack is
float("NaN")<|endoftext|> |
e8d97ae3edda708ffb4ece3661262fd9efb251ac792a1299891bdf389f9dda14 | def RAND(self):
'\n Returns a random number between 1 and the top element on the stack (inclusive).\n\n Error Hnadling:\n If stack is empty, push 0 to the top of the stack.\n If top of the stack is negative, push 0 to the top of the stack.\n :return:\n '
if (self.PEEK() and (self.PEEK() > 0)):
self.PUSH((randrange(self.PEEK()) + 1))
else:
self.PUSH(0) | Returns a random number between 1 and the top element on the stack (inclusive).
Error Hnadling:
If stack is empty, push 0 to the top of the stack.
If top of the stack is negative, push 0 to the top of the stack.
:return: | pikapy/stack.py | RAND | DanyGLewin/pykachu | 0 | python | def RAND(self):
'\n Returns a random number between 1 and the top element on the stack (inclusive).\n\n Error Hnadling:\n If stack is empty, push 0 to the top of the stack.\n If top of the stack is negative, push 0 to the top of the stack.\n :return:\n '
if (self.PEEK() and (self.PEEK() > 0)):
self.PUSH((randrange(self.PEEK()) + 1))
else:
self.PUSH(0) | def RAND(self):
'\n Returns a random number between 1 and the top element on the stack (inclusive).\n\n Error Hnadling:\n If stack is empty, push 0 to the top of the stack.\n If top of the stack is negative, push 0 to the top of the stack.\n :return:\n '
if (self.PEEK() and (self.PEEK() > 0)):
self.PUSH((randrange(self.PEEK()) + 1))
else:
self.PUSH(0)<|docstring|>Returns a random number between 1 and the top element on the stack (inclusive).
Error Hnadling:
If stack is empty, push 0 to the top of the stack.
If top of the stack is negative, push 0 to the top of the stack.
:return:<|endoftext|> |
5d1c66cd85ad1b3822527c8b3a9d99791c433c303071dd242db21f23131010ca | def POP(self):
'\n Pops and returns the top element from the stack.\n\n Error Handling:\n If the stack is empty 0 is returned.\n '
if len(self.elements):
return self.elements.pop()
else:
return 0 | Pops and returns the top element from the stack.
Error Handling:
If the stack is empty 0 is returned. | pikapy/stack.py | POP | DanyGLewin/pykachu | 0 | python | def POP(self):
'\n Pops and returns the top element from the stack.\n\n Error Handling:\n If the stack is empty 0 is returned.\n '
if len(self.elements):
return self.elements.pop()
else:
return 0 | def POP(self):
'\n Pops and returns the top element from the stack.\n\n Error Handling:\n If the stack is empty 0 is returned.\n '
if len(self.elements):
return self.elements.pop()
else:
return 0<|docstring|>Pops and returns the top element from the stack.
Error Handling:
If the stack is empty 0 is returned.<|endoftext|> |
57732ba933094a17de5a17fc6aca692d4edffed2fe63102bc0438d340410eb59 | def PUSH(self, element):
'\n Pushes an element to the top of the stack.\n\n Arguments:\n element -> The element to push on the top of the stack.\n '
self.elements.append(element) | Pushes an element to the top of the stack.
Arguments:
element -> The element to push on the top of the stack. | pikapy/stack.py | PUSH | DanyGLewin/pykachu | 0 | python | def PUSH(self, element):
'\n Pushes an element to the top of the stack.\n\n Arguments:\n element -> The element to push on the top of the stack.\n '
self.elements.append(element) | def PUSH(self, element):
'\n Pushes an element to the top of the stack.\n\n Arguments:\n element -> The element to push on the top of the stack.\n '
self.elements.append(element)<|docstring|>Pushes an element to the top of the stack.
Arguments:
element -> The element to push on the top of the stack.<|endoftext|> |
bc8cca12cf75b88f66d12a05a71fe7cda284581a3a4400ae655f28cd74d054ca | def PEEK(self):
'\n Returns the top element from the stack without removing it.\n\n Error Handling:\n If the stack is empty 0 is returned.\n '
if len(self.elements):
return self.elements[(- 1)]
else:
return 0 | Returns the top element from the stack without removing it.
Error Handling:
If the stack is empty 0 is returned. | pikapy/stack.py | PEEK | DanyGLewin/pykachu | 0 | python | def PEEK(self):
'\n Returns the top element from the stack without removing it.\n\n Error Handling:\n If the stack is empty 0 is returned.\n '
if len(self.elements):
return self.elements[(- 1)]
else:
return 0 | def PEEK(self):
'\n Returns the top element from the stack without removing it.\n\n Error Handling:\n If the stack is empty 0 is returned.\n '
if len(self.elements):
return self.elements[(- 1)]
else:
return 0<|docstring|>Returns the top element from the stack without removing it.
Error Handling:
If the stack is empty 0 is returned.<|endoftext|> |
9ad2841e366c992bd8dd5ffc5f88a2d40452e8282386569ab401314b917b4186 | def EMPTY(self):
'\n Returns True if the stack is empty, false otherwise.\n '
return (len(self.elements) == 0) | Returns True if the stack is empty, false otherwise. | pikapy/stack.py | EMPTY | DanyGLewin/pykachu | 0 | python | def EMPTY(self):
'\n \n '
return (len(self.elements) == 0) | def EMPTY(self):
'\n \n '
return (len(self.elements) == 0)<|docstring|>Returns True if the stack is empty, false otherwise.<|endoftext|> |
6591f3908b89a3ccd93bc04c0c35103b3bc7ced51d7ef05f6a21f83d9953ec9b | def __str__(self):
'Defines the string representation of the PikaStack object.'
return str(self.elements) | Defines the string representation of the PikaStack object. | pikapy/stack.py | __str__ | DanyGLewin/pykachu | 0 | python | def __str__(self):
return str(self.elements) | def __str__(self):
return str(self.elements)<|docstring|>Defines the string representation of the PikaStack object.<|endoftext|> |
0ff3a1079dfaf808f564582ced0c9a271e4ded4b889085e438b299b3ea3da061 | def delete(self, request, username):
" The current user is able to unfollow another user's profile. "
follower = request.user.profile
try:
followed = Profile.objects.get(user__username=username)
except Profile.DoesNotExist:
raise NotFound('The user with this profile does not exist')
follower.unfollow(followed)
serializer = self.serializer_class(follower, context={'request': request})
return Response(serializer.data, status=status.HTTP_200_OK) | The current user is able to unfollow another user's profile. | authors/apps/profiles/views.py | delete | Tittoh/blog-API | 1 | python | def delete(self, request, username):
" "
follower = request.user.profile
try:
followed = Profile.objects.get(user__username=username)
except Profile.DoesNotExist:
raise NotFound('The user with this profile does not exist')
follower.unfollow(followed)
serializer = self.serializer_class(follower, context={'request': request})
return Response(serializer.data, status=status.HTTP_200_OK) | def delete(self, request, username):
" "
follower = request.user.profile
try:
followed = Profile.objects.get(user__username=username)
except Profile.DoesNotExist:
raise NotFound('The user with this profile does not exist')
follower.unfollow(followed)
serializer = self.serializer_class(follower, context={'request': request})
return Response(serializer.data, status=status.HTTP_200_OK)<|docstring|>The current user is able to unfollow another user's profile.<|endoftext|> |
863be5c0eec284eab9d2c4138b1509c7aacc0fc5f9df5fb88f88b9f407b5090a | def post(self, request, username):
" The current user is able to follow another user's profile. "
follower = request.user.profile
try:
followed = Profile.objects.get(user__username=username)
except Profile.DoesNotExist:
raise NotFound('The user with this profile does not exist')
if (follower.pk is followed.pk):
raise serializers.ValidationError('You cannot follow yourself')
follower.follow(followed)
serializer = self.serializer_class(follower, context={'request': request})
return Response(serializer.data, status=status.HTTP_200_OK) | The current user is able to follow another user's profile. | authors/apps/profiles/views.py | post | Tittoh/blog-API | 1 | python | def post(self, request, username):
" "
follower = request.user.profile
try:
followed = Profile.objects.get(user__username=username)
except Profile.DoesNotExist:
raise NotFound('The user with this profile does not exist')
if (follower.pk is followed.pk):
raise serializers.ValidationError('You cannot follow yourself')
follower.follow(followed)
serializer = self.serializer_class(follower, context={'request': request})
return Response(serializer.data, status=status.HTTP_200_OK) | def post(self, request, username):
" "
follower = request.user.profile
try:
followed = Profile.objects.get(user__username=username)
except Profile.DoesNotExist:
raise NotFound('The user with this profile does not exist')
if (follower.pk is followed.pk):
raise serializers.ValidationError('You cannot follow yourself')
follower.follow(followed)
serializer = self.serializer_class(follower, context={'request': request})
return Response(serializer.data, status=status.HTTP_200_OK)<|docstring|>The current user is able to follow another user's profile.<|endoftext|> |
ff94a347a80e8333bab3d8b53b619e850f02d708f82d3de48c962217446351e2 | def __init__(self, ical, account, href=None, local_tz=None, default_tz=None, start=None, end=None, color=None, readonly=False, unicode_symbols=True, etag=None):
"\n :param ical: the icalendar VEVENT this event is based on\n :type ical: str or icalendar.cal.EVent\n :param account: the account/calendar this event belongs to\n :type account: str\n :param href: the href of the event, treated like a UID\n :type href: str\n :param local_tz: the local timezone the user wants event's times\n displayed in\n :type local_tz: datetime.tzinfo\n :param default_tz: the timezone used if the start and end time\n of the event have no timezone information\n (or none that icalendar understands)\n :type default_tz: datetime.tzinfo\n :param start: start date[time] of this event, this will override the\n start date from the vevent. This is useful for recurring\n events, since we only save the original event once and\n that original events start and end times might not be\n *this* event's start and end time.\n :type start: datetime.date or datetime.datetime\n :param end: see :param start:\n :type end: datetime.date or datetime.datetime\n :param color: the color this event should be shown in ikhal and khal,\n Supported color names are :\n black, white, brown, yellow, dark grey, dark green,\n dark blue, light grey, light green, light blue,\n dark magenta, dark cyan, dark red, light magenta,\n light cyan, light red\n :type color: str\n :param readonly: flag to show if this event may be modified or not\n :type readonly: bool\n :param unicode_symbols: some terminal fonts to not support fancey\n unicode symbols, if set to False pure ascii\n alternatives will be shown\n :type unicode_symbols: bool\n :param etag: the event's etag, will not be modified\n :type etag: str\n "
if isinstance(ical, unicode_type):
self.vevent = icalendar.Event.from_ical(ical)
elif isinstance(ical, bytes_type):
self.vevent = icalendar.Event.from_ical(ical.decode('utf-8'))
elif isinstance(ical, icalendar.cal.Event):
self.vevent = ical
else:
raise ValueError
self.allday = True
self.color = color
if (href is None):
uid = self.vevent['UID']
href = (uid + '.ics')
self.account = account
self.readonly = readonly
self.unicode_symbols = unicode_symbols
self.etag = etag
self.href = href
if unicode_symbols:
self.recurstr = u' ⟳'
self.rangestr = u'↔ '
self.rangestopstr = u'⇥ '
self.rangestartstr = u'↦ '
else:
self.recurstr = u' R'
self.rangestr = u' <->'
self.rangestopstr = u' ->|'
self.rangestartstr = u' |->'
if (start is not None):
if isinstance(self.vevent['dtstart'].dt, datetime.datetime):
self.allday = False
start = start.astimezone(local_tz)
end = end.astimezone(local_tz)
self.vevent['DTSTART'].dt = start
if (start is not None):
if ('DTEND' in self.vevent.keys()):
self.vevent['DTEND'].dt = end
self.local_tz = local_tz
self.default_tz = default_tz | :param ical: the icalendar VEVENT this event is based on
:type ical: str or icalendar.cal.EVent
:param account: the account/calendar this event belongs to
:type account: str
:param href: the href of the event, treated like a UID
:type href: str
:param local_tz: the local timezone the user wants event's times
displayed in
:type local_tz: datetime.tzinfo
:param default_tz: the timezone used if the start and end time
of the event have no timezone information
(or none that icalendar understands)
:type default_tz: datetime.tzinfo
:param start: start date[time] of this event, this will override the
start date from the vevent. This is useful for recurring
events, since we only save the original event once and
that original events start and end times might not be
*this* event's start and end time.
:type start: datetime.date or datetime.datetime
:param end: see :param start:
:type end: datetime.date or datetime.datetime
:param color: the color this event should be shown in ikhal and khal,
Supported color names are :
black, white, brown, yellow, dark grey, dark green,
dark blue, light grey, light green, light blue,
dark magenta, dark cyan, dark red, light magenta,
light cyan, light red
:type color: str
:param readonly: flag to show if this event may be modified or not
:type readonly: bool
:param unicode_symbols: some terminal fonts to not support fancey
unicode symbols, if set to False pure ascii
alternatives will be shown
:type unicode_symbols: bool
:param etag: the event's etag, will not be modified
:type etag: str | khal/khalendar/event.py | __init__ | untitaker/khal | 0 | python | def __init__(self, ical, account, href=None, local_tz=None, default_tz=None, start=None, end=None, color=None, readonly=False, unicode_symbols=True, etag=None):
"\n :param ical: the icalendar VEVENT this event is based on\n :type ical: str or icalendar.cal.EVent\n :param account: the account/calendar this event belongs to\n :type account: str\n :param href: the href of the event, treated like a UID\n :type href: str\n :param local_tz: the local timezone the user wants event's times\n displayed in\n :type local_tz: datetime.tzinfo\n :param default_tz: the timezone used if the start and end time\n of the event have no timezone information\n (or none that icalendar understands)\n :type default_tz: datetime.tzinfo\n :param start: start date[time] of this event, this will override the\n start date from the vevent. This is useful for recurring\n events, since we only save the original event once and\n that original events start and end times might not be\n *this* event's start and end time.\n :type start: datetime.date or datetime.datetime\n :param end: see :param start:\n :type end: datetime.date or datetime.datetime\n :param color: the color this event should be shown in ikhal and khal,\n Supported color names are :\n black, white, brown, yellow, dark grey, dark green,\n dark blue, light grey, light green, light blue,\n dark magenta, dark cyan, dark red, light magenta,\n light cyan, light red\n :type color: str\n :param readonly: flag to show if this event may be modified or not\n :type readonly: bool\n :param unicode_symbols: some terminal fonts to not support fancey\n unicode symbols, if set to False pure ascii\n alternatives will be shown\n :type unicode_symbols: bool\n :param etag: the event's etag, will not be modified\n :type etag: str\n "
if isinstance(ical, unicode_type):
self.vevent = icalendar.Event.from_ical(ical)
elif isinstance(ical, bytes_type):
self.vevent = icalendar.Event.from_ical(ical.decode('utf-8'))
elif isinstance(ical, icalendar.cal.Event):
self.vevent = ical
else:
raise ValueError
self.allday = True
self.color = color
if (href is None):
uid = self.vevent['UID']
href = (uid + '.ics')
self.account = account
self.readonly = readonly
self.unicode_symbols = unicode_symbols
self.etag = etag
self.href = href
if unicode_symbols:
self.recurstr = u' ⟳'
self.rangestr = u'↔ '
self.rangestopstr = u'⇥ '
self.rangestartstr = u'↦ '
else:
self.recurstr = u' R'
self.rangestr = u' <->'
self.rangestopstr = u' ->|'
self.rangestartstr = u' |->'
if (start is not None):
if isinstance(self.vevent['dtstart'].dt, datetime.datetime):
self.allday = False
start = start.astimezone(local_tz)
end = end.astimezone(local_tz)
self.vevent['DTSTART'].dt = start
if (start is not None):
if ('DTEND' in self.vevent.keys()):
self.vevent['DTEND'].dt = end
self.local_tz = local_tz
self.default_tz = default_tz | def __init__(self, ical, account, href=None, local_tz=None, default_tz=None, start=None, end=None, color=None, readonly=False, unicode_symbols=True, etag=None):
"\n :param ical: the icalendar VEVENT this event is based on\n :type ical: str or icalendar.cal.EVent\n :param account: the account/calendar this event belongs to\n :type account: str\n :param href: the href of the event, treated like a UID\n :type href: str\n :param local_tz: the local timezone the user wants event's times\n displayed in\n :type local_tz: datetime.tzinfo\n :param default_tz: the timezone used if the start and end time\n of the event have no timezone information\n (or none that icalendar understands)\n :type default_tz: datetime.tzinfo\n :param start: start date[time] of this event, this will override the\n start date from the vevent. This is useful for recurring\n events, since we only save the original event once and\n that original events start and end times might not be\n *this* event's start and end time.\n :type start: datetime.date or datetime.datetime\n :param end: see :param start:\n :type end: datetime.date or datetime.datetime\n :param color: the color this event should be shown in ikhal and khal,\n Supported color names are :\n black, white, brown, yellow, dark grey, dark green,\n dark blue, light grey, light green, light blue,\n dark magenta, dark cyan, dark red, light magenta,\n light cyan, light red\n :type color: str\n :param readonly: flag to show if this event may be modified or not\n :type readonly: bool\n :param unicode_symbols: some terminal fonts to not support fancey\n unicode symbols, if set to False pure ascii\n alternatives will be shown\n :type unicode_symbols: bool\n :param etag: the event's etag, will not be modified\n :type etag: str\n "
if isinstance(ical, unicode_type):
self.vevent = icalendar.Event.from_ical(ical)
elif isinstance(ical, bytes_type):
self.vevent = icalendar.Event.from_ical(ical.decode('utf-8'))
elif isinstance(ical, icalendar.cal.Event):
self.vevent = ical
else:
raise ValueError
self.allday = True
self.color = color
if (href is None):
uid = self.vevent['UID']
href = (uid + '.ics')
self.account = account
self.readonly = readonly
self.unicode_symbols = unicode_symbols
self.etag = etag
self.href = href
if unicode_symbols:
self.recurstr = u' ⟳'
self.rangestr = u'↔ '
self.rangestopstr = u'⇥ '
self.rangestartstr = u'↦ '
else:
self.recurstr = u' R'
self.rangestr = u' <->'
self.rangestopstr = u' ->|'
self.rangestartstr = u' |->'
if (start is not None):
if isinstance(self.vevent['dtstart'].dt, datetime.datetime):
self.allday = False
start = start.astimezone(local_tz)
end = end.astimezone(local_tz)
self.vevent['DTSTART'].dt = start
if (start is not None):
if ('DTEND' in self.vevent.keys()):
self.vevent['DTEND'].dt = end
self.local_tz = local_tz
self.default_tz = default_tz<|docstring|>:param ical: the icalendar VEVENT this event is based on
:type ical: str or icalendar.cal.EVent
:param account: the account/calendar this event belongs to
:type account: str
:param href: the href of the event, treated like a UID
:type href: str
:param local_tz: the local timezone the user wants event's times
displayed in
:type local_tz: datetime.tzinfo
:param default_tz: the timezone used if the start and end time
of the event have no timezone information
(or none that icalendar understands)
:type default_tz: datetime.tzinfo
:param start: start date[time] of this event, this will override the
start date from the vevent. This is useful for recurring
events, since we only save the original event once and
that original events start and end times might not be
*this* event's start and end time.
:type start: datetime.date or datetime.datetime
:param end: see :param start:
:type end: datetime.date or datetime.datetime
:param color: the color this event should be shown in ikhal and khal,
Supported color names are :
black, white, brown, yellow, dark grey, dark green,
dark blue, light grey, light green, light blue,
dark magenta, dark cyan, dark red, light magenta,
light cyan, light red
:type color: str
:param readonly: flag to show if this event may be modified or not
:type readonly: bool
:param unicode_symbols: some terminal fonts to not support fancey
unicode symbols, if set to False pure ascii
alternatives will be shown
:type unicode_symbols: bool
:param etag: the event's etag, will not be modified
:type etag: str<|endoftext|> |
5bb2ed7564bb67813a63ccd7df940889dc0740a3ded23d85063249ac5e165a8d | def _compact_datetime(self, day, timeformat='%M:%H'):
'compact description of this event\n\n TODO: explain day param\n\n :param day:\n :type day: datetime.date\n\n :return: compact description of Event\n :rtype: unicode()\n '
start = datetime.datetime.combine(day, datetime.time.min)
end = datetime.datetime.combine(day, datetime.time.max)
local_start = self.local_tz.localize(start)
local_end = self.local_tz.localize(end)
if ('RRULE' in self.vevent.keys()):
recurstr = self.recurstr
else:
recurstr = ''
tostr = '-'
if (self.start < local_start):
startstr = u'→ '
tostr = ''
else:
startstr = self.start.strftime(timeformat)
if (self.end > local_end):
endstr = u' → '
tostr = ''
else:
endstr = self.end.strftime(timeformat)
return (((((startstr + tostr) + endstr) + ': ') + self.summary) + recurstr) | compact description of this event
TODO: explain day param
:param day:
:type day: datetime.date
:return: compact description of Event
:rtype: unicode() | khal/khalendar/event.py | _compact_datetime | untitaker/khal | 0 | python | def _compact_datetime(self, day, timeformat='%M:%H'):
'compact description of this event\n\n TODO: explain day param\n\n :param day:\n :type day: datetime.date\n\n :return: compact description of Event\n :rtype: unicode()\n '
start = datetime.datetime.combine(day, datetime.time.min)
end = datetime.datetime.combine(day, datetime.time.max)
local_start = self.local_tz.localize(start)
local_end = self.local_tz.localize(end)
if ('RRULE' in self.vevent.keys()):
recurstr = self.recurstr
else:
recurstr =
tostr = '-'
if (self.start < local_start):
startstr = u'→ '
tostr =
else:
startstr = self.start.strftime(timeformat)
if (self.end > local_end):
endstr = u' → '
tostr =
else:
endstr = self.end.strftime(timeformat)
return (((((startstr + tostr) + endstr) + ': ') + self.summary) + recurstr) | def _compact_datetime(self, day, timeformat='%M:%H'):
'compact description of this event\n\n TODO: explain day param\n\n :param day:\n :type day: datetime.date\n\n :return: compact description of Event\n :rtype: unicode()\n '
start = datetime.datetime.combine(day, datetime.time.min)
end = datetime.datetime.combine(day, datetime.time.max)
local_start = self.local_tz.localize(start)
local_end = self.local_tz.localize(end)
if ('RRULE' in self.vevent.keys()):
recurstr = self.recurstr
else:
recurstr =
tostr = '-'
if (self.start < local_start):
startstr = u'→ '
tostr =
else:
startstr = self.start.strftime(timeformat)
if (self.end > local_end):
endstr = u' → '
tostr =
else:
endstr = self.end.strftime(timeformat)
return (((((startstr + tostr) + endstr) + ': ') + self.summary) + recurstr)<|docstring|>compact description of this event
TODO: explain day param
:param day:
:type day: datetime.date
:return: compact description of Event
:rtype: unicode()<|endoftext|> |
bb4ed6c1c0a6a9fa1b28fe73e910921ab22836ae8c2f65f8c05e3558b79fcdff | def _create_calendar(self):
'\n create the calendar\n\n :returns: calendar\n :rtype: icalendar.Calendar()\n '
calendar = icalendar.Calendar()
calendar.add('version', '2.0')
calendar.add('prodid', '-//CALENDARSERVER.ORG//NONSGML Version 1//EN')
return calendar | create the calendar
:returns: calendar
:rtype: icalendar.Calendar() | khal/khalendar/event.py | _create_calendar | untitaker/khal | 0 | python | def _create_calendar(self):
'\n create the calendar\n\n :returns: calendar\n :rtype: icalendar.Calendar()\n '
calendar = icalendar.Calendar()
calendar.add('version', '2.0')
calendar.add('prodid', '-//CALENDARSERVER.ORG//NONSGML Version 1//EN')
return calendar | def _create_calendar(self):
'\n create the calendar\n\n :returns: calendar\n :rtype: icalendar.Calendar()\n '
calendar = icalendar.Calendar()
calendar.add('version', '2.0')
calendar.add('prodid', '-//CALENDARSERVER.ORG//NONSGML Version 1//EN')
return calendar<|docstring|>create the calendar
:returns: calendar
:rtype: icalendar.Calendar()<|endoftext|> |
f439fd0cbc66beb320817b3697861d2932cc06be45ef42f87122a676955e041e | def _create_timezone(self, tz):
'\n create an icalendar timezone from a pytz.tzinfo\n\n :param tz: the timezone\n :type tz: pytz.tzinfo\n :returns: timezone information set\n :rtype: icalendar.Timezone()\n '
timezone = icalendar.Timezone()
timezone.add('TZID', tz)
this_year = datetime.datetime.today().year
(daylight, standard) = [(num, dt) for (num, dt) in enumerate(tz._utc_transition_times) if (dt.year == this_year)]
timezone_daylight = icalendar.TimezoneDaylight()
timezone_daylight.add('TZNAME', tz._transition_info[daylight[0]][2])
timezone_daylight.add('DTSTART', daylight[1])
timezone_daylight.add('TZOFFSETFROM', tz._transition_info[daylight[0]][0])
timezone_daylight.add('TZOFFSETTO', tz._transition_info[standard[0]][0])
timezone_standard = icalendar.TimezoneStandard()
timezone_standard.add('TZNAME', tz._transition_info[standard[0]][2])
timezone_standard.add('DTSTART', standard[1])
timezone_standard.add('TZOFFSETFROM', tz._transition_info[standard[0]][0])
timezone_standard.add('TZOFFSETTO', tz._transition_info[daylight[0]][0])
timezone.add_component(timezone_daylight)
timezone.add_component(timezone_standard)
return timezone | create an icalendar timezone from a pytz.tzinfo
:param tz: the timezone
:type tz: pytz.tzinfo
:returns: timezone information set
:rtype: icalendar.Timezone() | khal/khalendar/event.py | _create_timezone | untitaker/khal | 0 | python | def _create_timezone(self, tz):
'\n create an icalendar timezone from a pytz.tzinfo\n\n :param tz: the timezone\n :type tz: pytz.tzinfo\n :returns: timezone information set\n :rtype: icalendar.Timezone()\n '
timezone = icalendar.Timezone()
timezone.add('TZID', tz)
this_year = datetime.datetime.today().year
(daylight, standard) = [(num, dt) for (num, dt) in enumerate(tz._utc_transition_times) if (dt.year == this_year)]
timezone_daylight = icalendar.TimezoneDaylight()
timezone_daylight.add('TZNAME', tz._transition_info[daylight[0]][2])
timezone_daylight.add('DTSTART', daylight[1])
timezone_daylight.add('TZOFFSETFROM', tz._transition_info[daylight[0]][0])
timezone_daylight.add('TZOFFSETTO', tz._transition_info[standard[0]][0])
timezone_standard = icalendar.TimezoneStandard()
timezone_standard.add('TZNAME', tz._transition_info[standard[0]][2])
timezone_standard.add('DTSTART', standard[1])
timezone_standard.add('TZOFFSETFROM', tz._transition_info[standard[0]][0])
timezone_standard.add('TZOFFSETTO', tz._transition_info[daylight[0]][0])
timezone.add_component(timezone_daylight)
timezone.add_component(timezone_standard)
return timezone | def _create_timezone(self, tz):
'\n create an icalendar timezone from a pytz.tzinfo\n\n :param tz: the timezone\n :type tz: pytz.tzinfo\n :returns: timezone information set\n :rtype: icalendar.Timezone()\n '
timezone = icalendar.Timezone()
timezone.add('TZID', tz)
this_year = datetime.datetime.today().year
(daylight, standard) = [(num, dt) for (num, dt) in enumerate(tz._utc_transition_times) if (dt.year == this_year)]
timezone_daylight = icalendar.TimezoneDaylight()
timezone_daylight.add('TZNAME', tz._transition_info[daylight[0]][2])
timezone_daylight.add('DTSTART', daylight[1])
timezone_daylight.add('TZOFFSETFROM', tz._transition_info[daylight[0]][0])
timezone_daylight.add('TZOFFSETTO', tz._transition_info[standard[0]][0])
timezone_standard = icalendar.TimezoneStandard()
timezone_standard.add('TZNAME', tz._transition_info[standard[0]][2])
timezone_standard.add('DTSTART', standard[1])
timezone_standard.add('TZOFFSETFROM', tz._transition_info[standard[0]][0])
timezone_standard.add('TZOFFSETTO', tz._transition_info[daylight[0]][0])
timezone.add_component(timezone_daylight)
timezone.add_component(timezone_standard)
return timezone<|docstring|>create an icalendar timezone from a pytz.tzinfo
:param tz: the timezone
:type tz: pytz.tzinfo
:returns: timezone information set
:rtype: icalendar.Timezone()<|endoftext|> |
b2509600de27be080d637463cadd9b8b34776d110ad1a87ab45cd59b7edafc28 | def on_freeze(self):
'\n What happens if the position doesnt change for 5 minutes\n '
self.client.freeze()
self.status = 'frozen'
self.status_sign.emit('frozen')
self.freezetimer.cancel()
self.freezetimer = Timer(300, self.on_freeze)
self.freezetimer.start() | What happens if the position doesnt change for 5 minutes | spaghettiqueue/logparser.py | on_freeze | giorgioshine/SpaghettiQueue | 9 | python | def on_freeze(self):
'\n \n '
self.client.freeze()
self.status = 'frozen'
self.status_sign.emit('frozen')
self.freezetimer.cancel()
self.freezetimer = Timer(300, self.on_freeze)
self.freezetimer.start() | def on_freeze(self):
'\n \n '
self.client.freeze()
self.status = 'frozen'
self.status_sign.emit('frozen')
self.freezetimer.cancel()
self.freezetimer = Timer(300, self.on_freeze)
self.freezetimer.start()<|docstring|>What happens if the position doesnt change for 5 minutes<|endoftext|> |
b8bd53b8538f2cd9c6bde9139b82e7d9e4844eec168f85daa16983f733093e56 | def stop(self):
'\n Prevents timer thread from running after closing the gui\n '
self.freezetimer.cancel()
self.freezetimer = Timer(300, self.on_freeze)
self.status = 'stopped' | Prevents timer thread from running after closing the gui | spaghettiqueue/logparser.py | stop | giorgioshine/SpaghettiQueue | 9 | python | def stop(self):
'\n \n '
self.freezetimer.cancel()
self.freezetimer = Timer(300, self.on_freeze)
self.status = 'stopped' | def stop(self):
'\n \n '
self.freezetimer.cancel()
self.freezetimer = Timer(300, self.on_freeze)
self.status = 'stopped'<|docstring|>Prevents timer thread from running after closing the gui<|endoftext|> |
2bbb9909085aa9fb208afe5eff7d37888aac7c8880511f8f782e2a5316558e5e | def get_args():
' Method to get all commandline arguments for training '
parser = argparse.ArgumentParser(description='PyTorch Higgs Training')
parser.add_argument('--epochs', default=20, type=int, metavar='N', help='Total number of epochs to run')
parser.add_argument('--batch_size', default=128, type=int, metavar='N', help='training batch size')
parser.add_argument('--percent_unlabeled', type=float, default=1.0, help='Number of labeled data to have')
parser.add_argument('--learning_rate', type=float, default=0.01, help='Learning rate for neural nets')
parser.add_argument('--weight_decay', type=float, default=0.01, help='Weight decay for learning rate')
parser.add_argument('--env', type=int, help='NUMBER OF DIFF ENVIRONMENTS')
parser.add_argument('--val_iteration', type=int, default=1024, help='Number of labeled data')
args = parser.parse_args()
return args | Method to get all commandline arguments for training | args_util.py | get_args | cspradli/HiggsID | 0 | python | def get_args():
' '
parser = argparse.ArgumentParser(description='PyTorch Higgs Training')
parser.add_argument('--epochs', default=20, type=int, metavar='N', help='Total number of epochs to run')
parser.add_argument('--batch_size', default=128, type=int, metavar='N', help='training batch size')
parser.add_argument('--percent_unlabeled', type=float, default=1.0, help='Number of labeled data to have')
parser.add_argument('--learning_rate', type=float, default=0.01, help='Learning rate for neural nets')
parser.add_argument('--weight_decay', type=float, default=0.01, help='Weight decay for learning rate')
parser.add_argument('--env', type=int, help='NUMBER OF DIFF ENVIRONMENTS')
parser.add_argument('--val_iteration', type=int, default=1024, help='Number of labeled data')
args = parser.parse_args()
return args | def get_args():
' '
parser = argparse.ArgumentParser(description='PyTorch Higgs Training')
parser.add_argument('--epochs', default=20, type=int, metavar='N', help='Total number of epochs to run')
parser.add_argument('--batch_size', default=128, type=int, metavar='N', help='training batch size')
parser.add_argument('--percent_unlabeled', type=float, default=1.0, help='Number of labeled data to have')
parser.add_argument('--learning_rate', type=float, default=0.01, help='Learning rate for neural nets')
parser.add_argument('--weight_decay', type=float, default=0.01, help='Weight decay for learning rate')
parser.add_argument('--env', type=int, help='NUMBER OF DIFF ENVIRONMENTS')
parser.add_argument('--val_iteration', type=int, default=1024, help='Number of labeled data')
args = parser.parse_args()
return args<|docstring|>Method to get all commandline arguments for training<|endoftext|> |
8700efabc25faaee61d6ef74ec2acd486aec151335128fba7465fb05b52e2656 | @mock_http_response(responses.GET, '/sapi/v1/bswap/liquidityOps', mock_item, 200)
def test_bswap_liquidity():
'Tests the API endpoint to get liquidity operation (add/remove) records.'
client = Client(key, secret)
response = client.bswap_liquidity_operation_record()
response.should.equal(mock_item) | Tests the API endpoint to get liquidity operation (add/remove) records. | tests/spot/bswap/test_bswap_liquidity_operation_record.py | test_bswap_liquidity | SenthilVikram/binance-connector-python | 512 | python | @mock_http_response(responses.GET, '/sapi/v1/bswap/liquidityOps', mock_item, 200)
def test_bswap_liquidity():
client = Client(key, secret)
response = client.bswap_liquidity_operation_record()
response.should.equal(mock_item) | @mock_http_response(responses.GET, '/sapi/v1/bswap/liquidityOps', mock_item, 200)
def test_bswap_liquidity():
client = Client(key, secret)
response = client.bswap_liquidity_operation_record()
response.should.equal(mock_item)<|docstring|>Tests the API endpoint to get liquidity operation (add/remove) records.<|endoftext|> |
9e53ffcd2fe5631938da38ad9246f8ab047eee5e94a812d011cac288325c56c8 | def __init__(self, xml_file=None):
'\n Given a well formed XML file (xml_file), read it and turn it into\n a big string.\n '
self.__name = ''
self.__namespace = None
self.__include_header_files = []
self.__includes = []
self.__include_enum_files = []
self.__include_array_files = []
self.__comment = None
self.__format = None
self.__type_id = None
self.__string_size = None
self.__type = None
self.__size = None
self.__default = []
self.__xml_filename = xml_file
self.Config = ConfigManager.ConfigManager.getInstance()
typeslist = ['U8', 'I8', 'BYTE', 'I16', 'U16', 'I32', 'U32', 'I64', 'U64', 'F32', 'F64', 'bool', 'ENUM', 'string']
if (os.path.isfile(xml_file) == False):
stri = ('ERROR: Could not find specified XML file %s.' % xml_file)
raise IOError(stri)
fd = open(xml_file, 'r')
xml_file = os.path.basename(xml_file)
self.__xml_filename = xml_file
xml_parser = etree.XMLParser(remove_comments=True)
element_tree = etree.parse(fd, parser=xml_parser)
fd.close()
relax_file_handler = open((ROOTDIR + self.Config.get('schema', 'array')), 'r')
relax_parsed = etree.parse(relax_file_handler)
relax_file_handler.close()
relax_compiled = etree.RelaxNG(relax_parsed)
if (not relax_compiled.validate(element_tree)):
raise FprimeRngXmlValidationException(relax_compiled.error_log)
self.validate_xml(xml_file, element_tree, 'schematron', 'array_default')
array = element_tree.getroot()
if (array.tag != 'array'):
PRINT.info(('%s is not an array definition file' % xml_file))
sys.exit((- 1))
print(('Parsing Array %s' % array.attrib['name']))
self.__name = array.attrib['name']
if ('namespace' in array.attrib):
self.__namespace = array.attrib['namespace']
for array_tag in array:
if (array_tag.tag == 'format'):
self.__format = array_tag.text
elif (array_tag.tag == 'type'):
self.__type = array_tag.text
if (not (self.__type in typeslist)):
self.__typeinfo = 'extern'
else:
self.__typeinfo = 'basic'
if ('size' in array_tag.attrib):
self.__string_size = array_tag.attrib['size']
elif (array_tag.tag == 'typeid'):
self.__type_id = array_tag.text
elif (array_tag.tag == 'size'):
self.__size = array_tag.text
elif (array_tag.tag == 'default'):
for value_tag in array_tag:
self.__default.append(value_tag.text)
elif (array_tag.tag == 'comment'):
self.__comment = array_tag.text
elif (array_tag.tag == 'include_header'):
self.__include_header_files.append(array_tag.text)
elif (array_tag.tag == 'import_serializable_type'):
self.__includes.append(array_tag.text)
elif (array_tag.tag == 'import_enum_type'):
self.__include_enum_files.append(array_tag.text)
elif (array_tag.tag == 'import_array_type'):
self.__include_array_files.append(array_tag.text)
if (not ('typeid' in array.attrib)):
s = etree.tostring(element_tree.getroot())
h = hashlib.sha256(s)
n = h.hexdigest()
self.__type_id = ('0x' + n.upper()[(- 8):])
core = os.environ['BUILD_ROOT']
curdir = os.getcwd()
curdir.replace(core, '')
self.__include_path = curdir | Given a well formed XML file (xml_file), read it and turn it into
a big string. | Autocoders/Python/src/fprime_ac/parsers/XmlArrayParser.py | __init__ | shubham-shahh/fprime | 9,182 | python | def __init__(self, xml_file=None):
'\n Given a well formed XML file (xml_file), read it and turn it into\n a big string.\n '
self.__name =
self.__namespace = None
self.__include_header_files = []
self.__includes = []
self.__include_enum_files = []
self.__include_array_files = []
self.__comment = None
self.__format = None
self.__type_id = None
self.__string_size = None
self.__type = None
self.__size = None
self.__default = []
self.__xml_filename = xml_file
self.Config = ConfigManager.ConfigManager.getInstance()
typeslist = ['U8', 'I8', 'BYTE', 'I16', 'U16', 'I32', 'U32', 'I64', 'U64', 'F32', 'F64', 'bool', 'ENUM', 'string']
if (os.path.isfile(xml_file) == False):
stri = ('ERROR: Could not find specified XML file %s.' % xml_file)
raise IOError(stri)
fd = open(xml_file, 'r')
xml_file = os.path.basename(xml_file)
self.__xml_filename = xml_file
xml_parser = etree.XMLParser(remove_comments=True)
element_tree = etree.parse(fd, parser=xml_parser)
fd.close()
relax_file_handler = open((ROOTDIR + self.Config.get('schema', 'array')), 'r')
relax_parsed = etree.parse(relax_file_handler)
relax_file_handler.close()
relax_compiled = etree.RelaxNG(relax_parsed)
if (not relax_compiled.validate(element_tree)):
raise FprimeRngXmlValidationException(relax_compiled.error_log)
self.validate_xml(xml_file, element_tree, 'schematron', 'array_default')
array = element_tree.getroot()
if (array.tag != 'array'):
PRINT.info(('%s is not an array definition file' % xml_file))
sys.exit((- 1))
print(('Parsing Array %s' % array.attrib['name']))
self.__name = array.attrib['name']
if ('namespace' in array.attrib):
self.__namespace = array.attrib['namespace']
for array_tag in array:
if (array_tag.tag == 'format'):
self.__format = array_tag.text
elif (array_tag.tag == 'type'):
self.__type = array_tag.text
if (not (self.__type in typeslist)):
self.__typeinfo = 'extern'
else:
self.__typeinfo = 'basic'
if ('size' in array_tag.attrib):
self.__string_size = array_tag.attrib['size']
elif (array_tag.tag == 'typeid'):
self.__type_id = array_tag.text
elif (array_tag.tag == 'size'):
self.__size = array_tag.text
elif (array_tag.tag == 'default'):
for value_tag in array_tag:
self.__default.append(value_tag.text)
elif (array_tag.tag == 'comment'):
self.__comment = array_tag.text
elif (array_tag.tag == 'include_header'):
self.__include_header_files.append(array_tag.text)
elif (array_tag.tag == 'import_serializable_type'):
self.__includes.append(array_tag.text)
elif (array_tag.tag == 'import_enum_type'):
self.__include_enum_files.append(array_tag.text)
elif (array_tag.tag == 'import_array_type'):
self.__include_array_files.append(array_tag.text)
if (not ('typeid' in array.attrib)):
s = etree.tostring(element_tree.getroot())
h = hashlib.sha256(s)
n = h.hexdigest()
self.__type_id = ('0x' + n.upper()[(- 8):])
core = os.environ['BUILD_ROOT']
curdir = os.getcwd()
curdir.replace(core, )
self.__include_path = curdir | def __init__(self, xml_file=None):
'\n Given a well formed XML file (xml_file), read it and turn it into\n a big string.\n '
self.__name =
self.__namespace = None
self.__include_header_files = []
self.__includes = []
self.__include_enum_files = []
self.__include_array_files = []
self.__comment = None
self.__format = None
self.__type_id = None
self.__string_size = None
self.__type = None
self.__size = None
self.__default = []
self.__xml_filename = xml_file
self.Config = ConfigManager.ConfigManager.getInstance()
typeslist = ['U8', 'I8', 'BYTE', 'I16', 'U16', 'I32', 'U32', 'I64', 'U64', 'F32', 'F64', 'bool', 'ENUM', 'string']
if (os.path.isfile(xml_file) == False):
stri = ('ERROR: Could not find specified XML file %s.' % xml_file)
raise IOError(stri)
fd = open(xml_file, 'r')
xml_file = os.path.basename(xml_file)
self.__xml_filename = xml_file
xml_parser = etree.XMLParser(remove_comments=True)
element_tree = etree.parse(fd, parser=xml_parser)
fd.close()
relax_file_handler = open((ROOTDIR + self.Config.get('schema', 'array')), 'r')
relax_parsed = etree.parse(relax_file_handler)
relax_file_handler.close()
relax_compiled = etree.RelaxNG(relax_parsed)
if (not relax_compiled.validate(element_tree)):
raise FprimeRngXmlValidationException(relax_compiled.error_log)
self.validate_xml(xml_file, element_tree, 'schematron', 'array_default')
array = element_tree.getroot()
if (array.tag != 'array'):
PRINT.info(('%s is not an array definition file' % xml_file))
sys.exit((- 1))
print(('Parsing Array %s' % array.attrib['name']))
self.__name = array.attrib['name']
if ('namespace' in array.attrib):
self.__namespace = array.attrib['namespace']
for array_tag in array:
if (array_tag.tag == 'format'):
self.__format = array_tag.text
elif (array_tag.tag == 'type'):
self.__type = array_tag.text
if (not (self.__type in typeslist)):
self.__typeinfo = 'extern'
else:
self.__typeinfo = 'basic'
if ('size' in array_tag.attrib):
self.__string_size = array_tag.attrib['size']
elif (array_tag.tag == 'typeid'):
self.__type_id = array_tag.text
elif (array_tag.tag == 'size'):
self.__size = array_tag.text
elif (array_tag.tag == 'default'):
for value_tag in array_tag:
self.__default.append(value_tag.text)
elif (array_tag.tag == 'comment'):
self.__comment = array_tag.text
elif (array_tag.tag == 'include_header'):
self.__include_header_files.append(array_tag.text)
elif (array_tag.tag == 'import_serializable_type'):
self.__includes.append(array_tag.text)
elif (array_tag.tag == 'import_enum_type'):
self.__include_enum_files.append(array_tag.text)
elif (array_tag.tag == 'import_array_type'):
self.__include_array_files.append(array_tag.text)
if (not ('typeid' in array.attrib)):
s = etree.tostring(element_tree.getroot())
h = hashlib.sha256(s)
n = h.hexdigest()
self.__type_id = ('0x' + n.upper()[(- 8):])
core = os.environ['BUILD_ROOT']
curdir = os.getcwd()
curdir.replace(core, )
self.__include_path = curdir<|docstring|>Given a well formed XML file (xml_file), read it and turn it into
a big string.<|endoftext|> |
00b6f01da9794160bac7003efd7f3f4b6a1d40b022fabddca988727c9a87bd02 | def test_wigner_bell1_su2parity():
'wigner: testing the SU2 parity of the first Bell state.\n '
psi = bell_state('00')
steps = 25
theta = np.tile(np.linspace(0, np.pi, steps), 2).reshape(2, steps)
phi = np.tile(np.linspace(0, (2 * np.pi), steps), 2).reshape(2, steps)
slicearray = ['l', 'l']
wigner_analyt = np.zeros((steps, steps))
for t in range(steps):
for p in range(steps):
wigner_analyt[(t, p)] = np.real((((((1 + (np.sqrt(3) * np.cos(theta[(0, t)]))) * (1 + (np.sqrt(3) * np.cos(theta[(1, t)])))) + (3 * ((((np.sin(theta[(0, t)]) * np.exp(((- 1j) * phi[(0, p)]))) * np.sin(theta[(1, t)])) * np.exp(((- 1j) * phi[(1, p)]))) + (((np.sin(theta[(0, t)]) * np.exp((1j * phi[(0, p)]))) * np.sin(theta[(1, t)])) * np.exp((1j * phi[(1, p)])))))) + ((1 - (np.sqrt(3) * np.cos(theta[(0, t)]))) * (1 - (np.sqrt(3) * np.cos(theta[(1, t)]))))) / 8.0))
wigner_theo = wigner_transform(psi, 0.5, False, steps, slicearray)
assert_((np.sum(np.abs((wigner_analyt - wigner_theo))) < 1e-11)) | wigner: testing the SU2 parity of the first Bell state. | qutip/tests/test_wigner.py | test_wigner_bell1_su2parity | paniash/qutip | 1 | python | def test_wigner_bell1_su2parity():
'\n '
psi = bell_state('00')
steps = 25
theta = np.tile(np.linspace(0, np.pi, steps), 2).reshape(2, steps)
phi = np.tile(np.linspace(0, (2 * np.pi), steps), 2).reshape(2, steps)
slicearray = ['l', 'l']
wigner_analyt = np.zeros((steps, steps))
for t in range(steps):
for p in range(steps):
wigner_analyt[(t, p)] = np.real((((((1 + (np.sqrt(3) * np.cos(theta[(0, t)]))) * (1 + (np.sqrt(3) * np.cos(theta[(1, t)])))) + (3 * ((((np.sin(theta[(0, t)]) * np.exp(((- 1j) * phi[(0, p)]))) * np.sin(theta[(1, t)])) * np.exp(((- 1j) * phi[(1, p)]))) + (((np.sin(theta[(0, t)]) * np.exp((1j * phi[(0, p)]))) * np.sin(theta[(1, t)])) * np.exp((1j * phi[(1, p)])))))) + ((1 - (np.sqrt(3) * np.cos(theta[(0, t)]))) * (1 - (np.sqrt(3) * np.cos(theta[(1, t)]))))) / 8.0))
wigner_theo = wigner_transform(psi, 0.5, False, steps, slicearray)
assert_((np.sum(np.abs((wigner_analyt - wigner_theo))) < 1e-11)) | def test_wigner_bell1_su2parity():
'\n '
psi = bell_state('00')
steps = 25
theta = np.tile(np.linspace(0, np.pi, steps), 2).reshape(2, steps)
phi = np.tile(np.linspace(0, (2 * np.pi), steps), 2).reshape(2, steps)
slicearray = ['l', 'l']
wigner_analyt = np.zeros((steps, steps))
for t in range(steps):
for p in range(steps):
wigner_analyt[(t, p)] = np.real((((((1 + (np.sqrt(3) * np.cos(theta[(0, t)]))) * (1 + (np.sqrt(3) * np.cos(theta[(1, t)])))) + (3 * ((((np.sin(theta[(0, t)]) * np.exp(((- 1j) * phi[(0, p)]))) * np.sin(theta[(1, t)])) * np.exp(((- 1j) * phi[(1, p)]))) + (((np.sin(theta[(0, t)]) * np.exp((1j * phi[(0, p)]))) * np.sin(theta[(1, t)])) * np.exp((1j * phi[(1, p)])))))) + ((1 - (np.sqrt(3) * np.cos(theta[(0, t)]))) * (1 - (np.sqrt(3) * np.cos(theta[(1, t)]))))) / 8.0))
wigner_theo = wigner_transform(psi, 0.5, False, steps, slicearray)
assert_((np.sum(np.abs((wigner_analyt - wigner_theo))) < 1e-11))<|docstring|>wigner: testing the SU2 parity of the first Bell state.<|endoftext|> |
39cb2d58227db2fc778bf2e821b5173de5b33a57a5864dcf91f655bf9c695907 | @pytest.mark.slow
def test_wigner_bell4_su2parity():
'wigner: testing the SU2 parity of the fourth Bell state.\n '
psi = bell_state('11')
steps = 25
slicearray = ['l', 'l']
wigner_analyt = np.zeros((steps, steps))
for t in range(steps):
for p in range(steps):
wigner_analyt[(t, p)] = (- 0.5)
wigner_theo = wigner_transform(psi, 0.5, False, steps, slicearray)
assert_((np.sum(np.abs((wigner_analyt - wigner_theo))) < 1e-11)) | wigner: testing the SU2 parity of the fourth Bell state. | qutip/tests/test_wigner.py | test_wigner_bell4_su2parity | paniash/qutip | 1 | python | @pytest.mark.slow
def test_wigner_bell4_su2parity():
'\n '
psi = bell_state('11')
steps = 25
slicearray = ['l', 'l']
wigner_analyt = np.zeros((steps, steps))
for t in range(steps):
for p in range(steps):
wigner_analyt[(t, p)] = (- 0.5)
wigner_theo = wigner_transform(psi, 0.5, False, steps, slicearray)
assert_((np.sum(np.abs((wigner_analyt - wigner_theo))) < 1e-11)) | @pytest.mark.slow
def test_wigner_bell4_su2parity():
'\n '
psi = bell_state('11')
steps = 25
slicearray = ['l', 'l']
wigner_analyt = np.zeros((steps, steps))
for t in range(steps):
for p in range(steps):
wigner_analyt[(t, p)] = (- 0.5)
wigner_theo = wigner_transform(psi, 0.5, False, steps, slicearray)
assert_((np.sum(np.abs((wigner_analyt - wigner_theo))) < 1e-11))<|docstring|>wigner: testing the SU2 parity of the fourth Bell state.<|endoftext|> |
a9100673b9616cef13a375fd67b2a6595f9097d71dd4d37aeadb3c2499b58a14 | @pytest.mark.slow
def test_wigner_bell4_fullparity():
'wigner: testing the parity of the fourth Bell state using the parity of\n the full space.\n '
psi = bell_state('11')
steps = 25
slicearray = ['l', 'l']
wigner_analyt = np.zeros((steps, steps))
for t in range(steps):
for p in range(steps):
wigner_analyt[(t, p)] = (- 0.30901699)
print('wigner anal: ', wigner_analyt)
wigner_theo = wigner_transform(psi, 0.5, True, steps, slicearray)
print('wigner theo: ', wigner_theo)
assert_((np.sum(np.abs((wigner_analyt - wigner_theo))) < 0.0001)) | wigner: testing the parity of the fourth Bell state using the parity of
the full space. | qutip/tests/test_wigner.py | test_wigner_bell4_fullparity | paniash/qutip | 1 | python | @pytest.mark.slow
def test_wigner_bell4_fullparity():
'wigner: testing the parity of the fourth Bell state using the parity of\n the full space.\n '
psi = bell_state('11')
steps = 25
slicearray = ['l', 'l']
wigner_analyt = np.zeros((steps, steps))
for t in range(steps):
for p in range(steps):
wigner_analyt[(t, p)] = (- 0.30901699)
print('wigner anal: ', wigner_analyt)
wigner_theo = wigner_transform(psi, 0.5, True, steps, slicearray)
print('wigner theo: ', wigner_theo)
assert_((np.sum(np.abs((wigner_analyt - wigner_theo))) < 0.0001)) | @pytest.mark.slow
def test_wigner_bell4_fullparity():
'wigner: testing the parity of the fourth Bell state using the parity of\n the full space.\n '
psi = bell_state('11')
steps = 25
slicearray = ['l', 'l']
wigner_analyt = np.zeros((steps, steps))
for t in range(steps):
for p in range(steps):
wigner_analyt[(t, p)] = (- 0.30901699)
print('wigner anal: ', wigner_analyt)
wigner_theo = wigner_transform(psi, 0.5, True, steps, slicearray)
print('wigner theo: ', wigner_theo)
assert_((np.sum(np.abs((wigner_analyt - wigner_theo))) < 0.0001))<|docstring|>wigner: testing the parity of the fourth Bell state using the parity of
the full space.<|endoftext|> |
19e2bcb55367a9bd7eb37ac4ee0cc52b334c9fb87a421a30468abd297476b4c6 | def test_parity():
'wigner: testing the parity function.\n '
j = 0.5
assert_(((_parity(2, j)[(0, 0)] - ((1 - np.sqrt(3)) / 2.0)) < 1e-11))
assert_((_parity(2, j)[(0, 1)] < 1e-11))
assert_(((_parity(2, j)[(1, 1)] - ((1 + np.sqrt(3)) / 2.0)) < 1e-11))
assert_((_parity(2, j)[(1, 0)] < 1e-11)) | wigner: testing the parity function. | qutip/tests/test_wigner.py | test_parity | paniash/qutip | 1 | python | def test_parity():
'\n '
j = 0.5
assert_(((_parity(2, j)[(0, 0)] - ((1 - np.sqrt(3)) / 2.0)) < 1e-11))
assert_((_parity(2, j)[(0, 1)] < 1e-11))
assert_(((_parity(2, j)[(1, 1)] - ((1 + np.sqrt(3)) / 2.0)) < 1e-11))
assert_((_parity(2, j)[(1, 0)] < 1e-11)) | def test_parity():
'\n '
j = 0.5
assert_(((_parity(2, j)[(0, 0)] - ((1 - np.sqrt(3)) / 2.0)) < 1e-11))
assert_((_parity(2, j)[(0, 1)] < 1e-11))
assert_(((_parity(2, j)[(1, 1)] - ((1 + np.sqrt(3)) / 2.0)) < 1e-11))
assert_((_parity(2, j)[(1, 0)] < 1e-11))<|docstring|>wigner: testing the parity function.<|endoftext|> |
9defae4855e02f5e418ffc3764b3e91280855b3cd7742a7586c54b2e614dc5d6 | @pytest.mark.slow
def test_wigner_pure_su2():
'wigner: testing the SU2 wigner transformation of a pure state.\n '
psi = ket([1])
steps = 25
theta = np.linspace(0, np.pi, steps)
phi = np.linspace(0, (2 * np.pi), steps)
theta = theta[(None, :)]
phi = phi[(None, :)]
slicearray = ['l']
wigner_analyt = np.zeros((steps, steps))
for t in range(steps):
for p in range(steps):
wigner_analyt[(t, p)] = ((1 + (np.sqrt(3) * np.cos(theta[(0, t)]))) / 2.0)
wigner_theo = wigner_transform(psi, 0.5, False, steps, slicearray)
assert_((np.sum(np.abs((wigner_analyt - wigner_theo))) < 1e-11)) | wigner: testing the SU2 wigner transformation of a pure state. | qutip/tests/test_wigner.py | test_wigner_pure_su2 | paniash/qutip | 1 | python | @pytest.mark.slow
def test_wigner_pure_su2():
'\n '
psi = ket([1])
steps = 25
theta = np.linspace(0, np.pi, steps)
phi = np.linspace(0, (2 * np.pi), steps)
theta = theta[(None, :)]
phi = phi[(None, :)]
slicearray = ['l']
wigner_analyt = np.zeros((steps, steps))
for t in range(steps):
for p in range(steps):
wigner_analyt[(t, p)] = ((1 + (np.sqrt(3) * np.cos(theta[(0, t)]))) / 2.0)
wigner_theo = wigner_transform(psi, 0.5, False, steps, slicearray)
assert_((np.sum(np.abs((wigner_analyt - wigner_theo))) < 1e-11)) | @pytest.mark.slow
def test_wigner_pure_su2():
'\n '
psi = ket([1])
steps = 25
theta = np.linspace(0, np.pi, steps)
phi = np.linspace(0, (2 * np.pi), steps)
theta = theta[(None, :)]
phi = phi[(None, :)]
slicearray = ['l']
wigner_analyt = np.zeros((steps, steps))
for t in range(steps):
for p in range(steps):
wigner_analyt[(t, p)] = ((1 + (np.sqrt(3) * np.cos(theta[(0, t)]))) / 2.0)
wigner_theo = wigner_transform(psi, 0.5, False, steps, slicearray)
assert_((np.sum(np.abs((wigner_analyt - wigner_theo))) < 1e-11))<|docstring|>wigner: testing the SU2 wigner transformation of a pure state.<|endoftext|> |
1b0badf03735be85cc462315b54941fe05a1afdf17fa0601b3f3d5e38ed3a5c2 | @pytest.mark.slow
def test_wigner_ghz_su2parity():
'wigner: testing the SU2 wigner transformation of the GHZ state.\n '
psi = ((ket([0, 0, 0]) + ket([1, 1, 1])) / np.sqrt(2))
steps = 25
N = 3
theta = np.tile(np.linspace(0, np.pi, steps), N).reshape(N, steps)
phi = np.tile(np.linspace(0, (2 * np.pi), steps), N).reshape(N, steps)
slicearray = ['l', 'l', 'l']
wigner_analyt = np.zeros((steps, steps))
for t in range(steps):
for p in range(steps):
wigner_analyt[(t, p)] = np.real(((((((1 + (np.sqrt(3) * np.cos(theta[(0, t)]))) * (1 + (np.sqrt(3) * np.cos(theta[(1, t)])))) * (1 + (np.sqrt(3) * np.cos(theta[(2, t)])))) + ((3 ** (3 / 2)) * ((((((np.sin(theta[(0, t)]) * np.exp(((- 1j) * phi[(0, p)]))) * np.sin(theta[(1, t)])) * np.exp(((- 1j) * phi[(1, p)]))) * np.sin(theta[(2, t)])) * np.exp(((- 1j) * phi[(2, p)]))) + (((((np.sin(theta[(0, t)]) * np.exp((1j * phi[(0, p)]))) * np.sin(theta[(1, t)])) * np.exp((1j * phi[(1, p)]))) * np.sin(theta[(2, t)])) * np.exp((1j * phi[(2, p)])))))) + (((1 - (np.sqrt(3) * np.cos(theta[(0, t)]))) * (1 - (np.sqrt(3) * np.cos(theta[(1, t)])))) * (1 - (np.sqrt(3) * np.cos(theta[(2, t)]))))) / 16.0))
wigner_theo = wigner_transform(psi, 0.5, False, steps, slicearray)
assert_((np.sum(np.abs((wigner_analyt - wigner_theo))) < 1e-11)) | wigner: testing the SU2 wigner transformation of the GHZ state. | qutip/tests/test_wigner.py | test_wigner_ghz_su2parity | paniash/qutip | 1 | python | @pytest.mark.slow
def test_wigner_ghz_su2parity():
'\n '
psi = ((ket([0, 0, 0]) + ket([1, 1, 1])) / np.sqrt(2))
steps = 25
N = 3
theta = np.tile(np.linspace(0, np.pi, steps), N).reshape(N, steps)
phi = np.tile(np.linspace(0, (2 * np.pi), steps), N).reshape(N, steps)
slicearray = ['l', 'l', 'l']
wigner_analyt = np.zeros((steps, steps))
for t in range(steps):
for p in range(steps):
wigner_analyt[(t, p)] = np.real(((((((1 + (np.sqrt(3) * np.cos(theta[(0, t)]))) * (1 + (np.sqrt(3) * np.cos(theta[(1, t)])))) * (1 + (np.sqrt(3) * np.cos(theta[(2, t)])))) + ((3 ** (3 / 2)) * ((((((np.sin(theta[(0, t)]) * np.exp(((- 1j) * phi[(0, p)]))) * np.sin(theta[(1, t)])) * np.exp(((- 1j) * phi[(1, p)]))) * np.sin(theta[(2, t)])) * np.exp(((- 1j) * phi[(2, p)]))) + (((((np.sin(theta[(0, t)]) * np.exp((1j * phi[(0, p)]))) * np.sin(theta[(1, t)])) * np.exp((1j * phi[(1, p)]))) * np.sin(theta[(2, t)])) * np.exp((1j * phi[(2, p)])))))) + (((1 - (np.sqrt(3) * np.cos(theta[(0, t)]))) * (1 - (np.sqrt(3) * np.cos(theta[(1, t)])))) * (1 - (np.sqrt(3) * np.cos(theta[(2, t)]))))) / 16.0))
wigner_theo = wigner_transform(psi, 0.5, False, steps, slicearray)
assert_((np.sum(np.abs((wigner_analyt - wigner_theo))) < 1e-11)) | @pytest.mark.slow
def test_wigner_ghz_su2parity():
'\n '
psi = ((ket([0, 0, 0]) + ket([1, 1, 1])) / np.sqrt(2))
steps = 25
N = 3
theta = np.tile(np.linspace(0, np.pi, steps), N).reshape(N, steps)
phi = np.tile(np.linspace(0, (2 * np.pi), steps), N).reshape(N, steps)
slicearray = ['l', 'l', 'l']
wigner_analyt = np.zeros((steps, steps))
for t in range(steps):
for p in range(steps):
wigner_analyt[(t, p)] = np.real(((((((1 + (np.sqrt(3) * np.cos(theta[(0, t)]))) * (1 + (np.sqrt(3) * np.cos(theta[(1, t)])))) * (1 + (np.sqrt(3) * np.cos(theta[(2, t)])))) + ((3 ** (3 / 2)) * ((((((np.sin(theta[(0, t)]) * np.exp(((- 1j) * phi[(0, p)]))) * np.sin(theta[(1, t)])) * np.exp(((- 1j) * phi[(1, p)]))) * np.sin(theta[(2, t)])) * np.exp(((- 1j) * phi[(2, p)]))) + (((((np.sin(theta[(0, t)]) * np.exp((1j * phi[(0, p)]))) * np.sin(theta[(1, t)])) * np.exp((1j * phi[(1, p)]))) * np.sin(theta[(2, t)])) * np.exp((1j * phi[(2, p)])))))) + (((1 - (np.sqrt(3) * np.cos(theta[(0, t)]))) * (1 - (np.sqrt(3) * np.cos(theta[(1, t)])))) * (1 - (np.sqrt(3) * np.cos(theta[(2, t)]))))) / 16.0))
wigner_theo = wigner_transform(psi, 0.5, False, steps, slicearray)
assert_((np.sum(np.abs((wigner_analyt - wigner_theo))) < 1e-11))<|docstring|>wigner: testing the SU2 wigner transformation of the GHZ state.<|endoftext|> |
9f69b8d4172efd0e9c5d1ab46159c4376aeaaf6fcaef062a0b7dfdf15b9c05a6 | @pytest.mark.slow
def test_angle_slicing():
'wigner: tests angle slicing.\n '
psi1 = bell_state('00')
psi2 = bell_state('01')
psi3 = bell_state('10')
psi4 = bell_state('11')
steps = 25
j = 0.5
wigner1 = wigner_transform(psi1, j, False, steps, ['l', 'l'])
wigner2 = wigner_transform(psi2, j, False, steps, ['l', 'z'])
wigner3 = wigner_transform(psi3, j, False, steps, ['l', 'x'])
wigner4 = wigner_transform(psi4, j, False, steps, ['l', 'y'])
assert_((np.sum(np.abs((wigner2 - wigner1))) < 1e-11))
assert_((np.sum(np.abs((wigner3 - wigner2))) < 1e-11))
assert_((np.sum(np.abs((wigner4 - wigner3))) < 1e-11))
assert_((np.sum(np.abs((wigner4 - wigner1))) < 1e-11)) | wigner: tests angle slicing. | qutip/tests/test_wigner.py | test_angle_slicing | paniash/qutip | 1 | python | @pytest.mark.slow
def test_angle_slicing():
'\n '
psi1 = bell_state('00')
psi2 = bell_state('01')
psi3 = bell_state('10')
psi4 = bell_state('11')
steps = 25
j = 0.5
wigner1 = wigner_transform(psi1, j, False, steps, ['l', 'l'])
wigner2 = wigner_transform(psi2, j, False, steps, ['l', 'z'])
wigner3 = wigner_transform(psi3, j, False, steps, ['l', 'x'])
wigner4 = wigner_transform(psi4, j, False, steps, ['l', 'y'])
assert_((np.sum(np.abs((wigner2 - wigner1))) < 1e-11))
assert_((np.sum(np.abs((wigner3 - wigner2))) < 1e-11))
assert_((np.sum(np.abs((wigner4 - wigner3))) < 1e-11))
assert_((np.sum(np.abs((wigner4 - wigner1))) < 1e-11)) | @pytest.mark.slow
def test_angle_slicing():
'\n '
psi1 = bell_state('00')
psi2 = bell_state('01')
psi3 = bell_state('10')
psi4 = bell_state('11')
steps = 25
j = 0.5
wigner1 = wigner_transform(psi1, j, False, steps, ['l', 'l'])
wigner2 = wigner_transform(psi2, j, False, steps, ['l', 'z'])
wigner3 = wigner_transform(psi3, j, False, steps, ['l', 'x'])
wigner4 = wigner_transform(psi4, j, False, steps, ['l', 'y'])
assert_((np.sum(np.abs((wigner2 - wigner1))) < 1e-11))
assert_((np.sum(np.abs((wigner3 - wigner2))) < 1e-11))
assert_((np.sum(np.abs((wigner4 - wigner3))) < 1e-11))
assert_((np.sum(np.abs((wigner4 - wigner1))) < 1e-11))<|docstring|>wigner: tests angle slicing.<|endoftext|> |
1fd2d9909ee070a17dd58a67be3133008dfcbdf86a1e0d09d07dfcad12613107 | def test_wigner_coherent():
'wigner: test wigner function calculation for coherent states'
xvec = np.linspace((- 5.0), 5.0, 100)
yvec = xvec
(X, Y) = np.meshgrid(xvec, yvec)
a = (X + (1j * Y))
dx = (xvec[1] - xvec[0])
dy = (yvec[1] - yvec[0])
N = 20
beta = (rand() + (rand() * 1j))
psi = coherent(N, beta)
W_qutip = wigner(psi, xvec, yvec, g=2)
W_qutip_cl = wigner(psi, xvec, yvec, g=2, method='clenshaw')
W_analytic = ((2 / np.pi) * np.exp(((- 2) * (abs((a - beta)) ** 2))))
assert_((np.sum((abs((W_qutip - W_analytic)) ** 2)) < 0.0001))
assert_((np.sum((abs((W_qutip_cl - W_analytic)) ** 2)) < 0.0001))
assert_(((((np.sum(W_qutip) * dx) * dy) - 1.0) < 1e-08))
assert_(((((np.sum(W_qutip_cl) * dx) * dy) - 1.0) < 1e-08))
assert_(((((np.sum(W_analytic) * dx) * dy) - 1.0) < 1e-08)) | wigner: test wigner function calculation for coherent states | qutip/tests/test_wigner.py | test_wigner_coherent | paniash/qutip | 1 | python | def test_wigner_coherent():
xvec = np.linspace((- 5.0), 5.0, 100)
yvec = xvec
(X, Y) = np.meshgrid(xvec, yvec)
a = (X + (1j * Y))
dx = (xvec[1] - xvec[0])
dy = (yvec[1] - yvec[0])
N = 20
beta = (rand() + (rand() * 1j))
psi = coherent(N, beta)
W_qutip = wigner(psi, xvec, yvec, g=2)
W_qutip_cl = wigner(psi, xvec, yvec, g=2, method='clenshaw')
W_analytic = ((2 / np.pi) * np.exp(((- 2) * (abs((a - beta)) ** 2))))
assert_((np.sum((abs((W_qutip - W_analytic)) ** 2)) < 0.0001))
assert_((np.sum((abs((W_qutip_cl - W_analytic)) ** 2)) < 0.0001))
assert_(((((np.sum(W_qutip) * dx) * dy) - 1.0) < 1e-08))
assert_(((((np.sum(W_qutip_cl) * dx) * dy) - 1.0) < 1e-08))
assert_(((((np.sum(W_analytic) * dx) * dy) - 1.0) < 1e-08)) | def test_wigner_coherent():
xvec = np.linspace((- 5.0), 5.0, 100)
yvec = xvec
(X, Y) = np.meshgrid(xvec, yvec)
a = (X + (1j * Y))
dx = (xvec[1] - xvec[0])
dy = (yvec[1] - yvec[0])
N = 20
beta = (rand() + (rand() * 1j))
psi = coherent(N, beta)
W_qutip = wigner(psi, xvec, yvec, g=2)
W_qutip_cl = wigner(psi, xvec, yvec, g=2, method='clenshaw')
W_analytic = ((2 / np.pi) * np.exp(((- 2) * (abs((a - beta)) ** 2))))
assert_((np.sum((abs((W_qutip - W_analytic)) ** 2)) < 0.0001))
assert_((np.sum((abs((W_qutip_cl - W_analytic)) ** 2)) < 0.0001))
assert_(((((np.sum(W_qutip) * dx) * dy) - 1.0) < 1e-08))
assert_(((((np.sum(W_qutip_cl) * dx) * dy) - 1.0) < 1e-08))
assert_(((((np.sum(W_analytic) * dx) * dy) - 1.0) < 1e-08))<|docstring|>wigner: test wigner function calculation for coherent states<|endoftext|> |
fa528df9243fd37ea48bbcd44d85242ab6e12b730d1b72f52a8bf87c3f7d2b17 | def test_wigner_fock():
'wigner: test wigner function calculation for Fock states'
xvec = np.linspace((- 5.0), 5.0, 100)
yvec = xvec
(X, Y) = np.meshgrid(xvec, yvec)
a = (X + (1j * Y))
dx = (xvec[1] - xvec[0])
dy = (yvec[1] - yvec[0])
N = 15
for n in [2, 3, 4, 5, 6]:
psi = fock(N, n)
W_qutip = wigner(psi, xvec, yvec, g=2)
W_qutip_cl = wigner(psi, xvec, yvec, g=2, method='clenshaw')
W_qutip_sparse = wigner(psi, xvec, yvec, g=2, sparse=True, method='clenshaw')
W_analytic = ((((2 / np.pi) * ((- 1) ** n)) * np.exp(((- 2) * (abs(a) ** 2)))) * np.polyval(laguerre(n), (4 * (abs(a) ** 2))))
assert_((np.sum(abs((W_qutip - W_analytic))) < 0.0001))
assert_((np.sum(abs((W_qutip_cl - W_analytic))) < 0.0001))
assert_((np.sum(abs((W_qutip_sparse - W_analytic))) < 0.0001))
assert_(((((np.sum(W_qutip) * dx) * dy) - 1.0) < 1e-08))
assert_(((((np.sum(W_qutip_cl) * dx) * dy) - 1.0) < 1e-08))
assert_(((((np.sum(W_qutip_sparse) * dx) * dy) - 1.0) < 1e-08))
assert_(((((np.sum(W_analytic) * dx) * dy) - 1.0) < 1e-08)) | wigner: test wigner function calculation for Fock states | qutip/tests/test_wigner.py | test_wigner_fock | paniash/qutip | 1 | python | def test_wigner_fock():
xvec = np.linspace((- 5.0), 5.0, 100)
yvec = xvec
(X, Y) = np.meshgrid(xvec, yvec)
a = (X + (1j * Y))
dx = (xvec[1] - xvec[0])
dy = (yvec[1] - yvec[0])
N = 15
for n in [2, 3, 4, 5, 6]:
psi = fock(N, n)
W_qutip = wigner(psi, xvec, yvec, g=2)
W_qutip_cl = wigner(psi, xvec, yvec, g=2, method='clenshaw')
W_qutip_sparse = wigner(psi, xvec, yvec, g=2, sparse=True, method='clenshaw')
W_analytic = ((((2 / np.pi) * ((- 1) ** n)) * np.exp(((- 2) * (abs(a) ** 2)))) * np.polyval(laguerre(n), (4 * (abs(a) ** 2))))
assert_((np.sum(abs((W_qutip - W_analytic))) < 0.0001))
assert_((np.sum(abs((W_qutip_cl - W_analytic))) < 0.0001))
assert_((np.sum(abs((W_qutip_sparse - W_analytic))) < 0.0001))
assert_(((((np.sum(W_qutip) * dx) * dy) - 1.0) < 1e-08))
assert_(((((np.sum(W_qutip_cl) * dx) * dy) - 1.0) < 1e-08))
assert_(((((np.sum(W_qutip_sparse) * dx) * dy) - 1.0) < 1e-08))
assert_(((((np.sum(W_analytic) * dx) * dy) - 1.0) < 1e-08)) | def test_wigner_fock():
xvec = np.linspace((- 5.0), 5.0, 100)
yvec = xvec
(X, Y) = np.meshgrid(xvec, yvec)
a = (X + (1j * Y))
dx = (xvec[1] - xvec[0])
dy = (yvec[1] - yvec[0])
N = 15
for n in [2, 3, 4, 5, 6]:
psi = fock(N, n)
W_qutip = wigner(psi, xvec, yvec, g=2)
W_qutip_cl = wigner(psi, xvec, yvec, g=2, method='clenshaw')
W_qutip_sparse = wigner(psi, xvec, yvec, g=2, sparse=True, method='clenshaw')
W_analytic = ((((2 / np.pi) * ((- 1) ** n)) * np.exp(((- 2) * (abs(a) ** 2)))) * np.polyval(laguerre(n), (4 * (abs(a) ** 2))))
assert_((np.sum(abs((W_qutip - W_analytic))) < 0.0001))
assert_((np.sum(abs((W_qutip_cl - W_analytic))) < 0.0001))
assert_((np.sum(abs((W_qutip_sparse - W_analytic))) < 0.0001))
assert_(((((np.sum(W_qutip) * dx) * dy) - 1.0) < 1e-08))
assert_(((((np.sum(W_qutip_cl) * dx) * dy) - 1.0) < 1e-08))
assert_(((((np.sum(W_qutip_sparse) * dx) * dy) - 1.0) < 1e-08))
assert_(((((np.sum(W_analytic) * dx) * dy) - 1.0) < 1e-08))<|docstring|>wigner: test wigner function calculation for Fock states<|endoftext|> |
b184a9513d1795bcc584c73bdf4786536142aaafac0e57fbd7082eb4b2812a61 | def test_wigner_compare_methods_dm():
'wigner: compare wigner methods for random density matrices'
xvec = np.linspace((- 5.0), 5.0, 100)
yvec = xvec
(X, Y) = np.meshgrid(xvec, yvec)
dx = (xvec[1] - xvec[0])
dy = (yvec[1] - yvec[0])
N = 15
for n in range(10):
rho = rand_dm(N, (0.5 + (rand() / 2)))
W_qutip1 = wigner(rho, xvec, yvec, g=2)
W_qutip2 = wigner(rho, xvec, yvec, g=2, method='laguerre')
assert_((np.sum(abs((W_qutip1 - W_qutip1))) < 0.0001))
assert_(((((np.sum(W_qutip1) * dx) * dy) - 1.0) < 1e-08))
assert_(((((np.sum(W_qutip2) * dx) * dy) - 1.0) < 1e-08)) | wigner: compare wigner methods for random density matrices | qutip/tests/test_wigner.py | test_wigner_compare_methods_dm | paniash/qutip | 1 | python | def test_wigner_compare_methods_dm():
xvec = np.linspace((- 5.0), 5.0, 100)
yvec = xvec
(X, Y) = np.meshgrid(xvec, yvec)
dx = (xvec[1] - xvec[0])
dy = (yvec[1] - yvec[0])
N = 15
for n in range(10):
rho = rand_dm(N, (0.5 + (rand() / 2)))
W_qutip1 = wigner(rho, xvec, yvec, g=2)
W_qutip2 = wigner(rho, xvec, yvec, g=2, method='laguerre')
assert_((np.sum(abs((W_qutip1 - W_qutip1))) < 0.0001))
assert_(((((np.sum(W_qutip1) * dx) * dy) - 1.0) < 1e-08))
assert_(((((np.sum(W_qutip2) * dx) * dy) - 1.0) < 1e-08)) | def test_wigner_compare_methods_dm():
xvec = np.linspace((- 5.0), 5.0, 100)
yvec = xvec
(X, Y) = np.meshgrid(xvec, yvec)
dx = (xvec[1] - xvec[0])
dy = (yvec[1] - yvec[0])
N = 15
for n in range(10):
rho = rand_dm(N, (0.5 + (rand() / 2)))
W_qutip1 = wigner(rho, xvec, yvec, g=2)
W_qutip2 = wigner(rho, xvec, yvec, g=2, method='laguerre')
assert_((np.sum(abs((W_qutip1 - W_qutip1))) < 0.0001))
assert_(((((np.sum(W_qutip1) * dx) * dy) - 1.0) < 1e-08))
assert_(((((np.sum(W_qutip2) * dx) * dy) - 1.0) < 1e-08))<|docstring|>wigner: compare wigner methods for random density matrices<|endoftext|> |
29de5f54a3aa61e56c7aded20b07674176fc8687906778f865786e926146d1e9 | def test_wigner_compare_methods_ket():
'wigner: compare wigner methods for random state vectors'
xvec = np.linspace((- 5.0), 5.0, 100)
yvec = xvec
(X, Y) = np.meshgrid(xvec, yvec)
dx = (xvec[1] - xvec[0])
dy = (yvec[1] - yvec[0])
N = 15
for n in range(10):
psi = rand_ket(N, (0.5 + (rand() / 2)))
W_qutip1 = wigner(psi, xvec, yvec, g=2)
W_qutip2 = wigner(psi, xvec, yvec, g=2, sparse=True)
assert_((np.sum(abs((W_qutip1 - W_qutip2))) < 0.0001))
assert_(((((np.sum(W_qutip1) * dx) * dy) - 1.0) < 1e-08))
assert_(((((np.sum(W_qutip2) * dx) * dy) - 1.0) < 1e-08)) | wigner: compare wigner methods for random state vectors | qutip/tests/test_wigner.py | test_wigner_compare_methods_ket | paniash/qutip | 1 | python | def test_wigner_compare_methods_ket():
xvec = np.linspace((- 5.0), 5.0, 100)
yvec = xvec
(X, Y) = np.meshgrid(xvec, yvec)
dx = (xvec[1] - xvec[0])
dy = (yvec[1] - yvec[0])
N = 15
for n in range(10):
psi = rand_ket(N, (0.5 + (rand() / 2)))
W_qutip1 = wigner(psi, xvec, yvec, g=2)
W_qutip2 = wigner(psi, xvec, yvec, g=2, sparse=True)
assert_((np.sum(abs((W_qutip1 - W_qutip2))) < 0.0001))
assert_(((((np.sum(W_qutip1) * dx) * dy) - 1.0) < 1e-08))
assert_(((((np.sum(W_qutip2) * dx) * dy) - 1.0) < 1e-08)) | def test_wigner_compare_methods_ket():
xvec = np.linspace((- 5.0), 5.0, 100)
yvec = xvec
(X, Y) = np.meshgrid(xvec, yvec)
dx = (xvec[1] - xvec[0])
dy = (yvec[1] - yvec[0])
N = 15
for n in range(10):
psi = rand_ket(N, (0.5 + (rand() / 2)))
W_qutip1 = wigner(psi, xvec, yvec, g=2)
W_qutip2 = wigner(psi, xvec, yvec, g=2, sparse=True)
assert_((np.sum(abs((W_qutip1 - W_qutip2))) < 0.0001))
assert_(((((np.sum(W_qutip1) * dx) * dy) - 1.0) < 1e-08))
assert_(((((np.sum(W_qutip2) * dx) * dy) - 1.0) < 1e-08))<|docstring|>wigner: compare wigner methods for random state vectors<|endoftext|> |
b64733efe2b1ebfe714e24f0d5fda95788d87082755802cbd2fabff4d2aada5b | def test_wigner_fft_comparse_ket():
'Wigner: Compare Wigner fft and iterative for rand. ket'
N = 20
xvec = np.linspace((- 10), 10, 128)
for i in range(3):
rho = rand_ket(N)
(Wfft, yvec) = wigner(rho, xvec, xvec, method='fft')
W = wigner(rho, xvec, yvec, method='iterative')
Wdiff = abs((W - Wfft))
assert_equal((np.sum(abs(Wdiff)) < 1e-07), True) | Wigner: Compare Wigner fft and iterative for rand. ket | qutip/tests/test_wigner.py | test_wigner_fft_comparse_ket | paniash/qutip | 1 | python | def test_wigner_fft_comparse_ket():
N = 20
xvec = np.linspace((- 10), 10, 128)
for i in range(3):
rho = rand_ket(N)
(Wfft, yvec) = wigner(rho, xvec, xvec, method='fft')
W = wigner(rho, xvec, yvec, method='iterative')
Wdiff = abs((W - Wfft))
assert_equal((np.sum(abs(Wdiff)) < 1e-07), True) | def test_wigner_fft_comparse_ket():
N = 20
xvec = np.linspace((- 10), 10, 128)
for i in range(3):
rho = rand_ket(N)
(Wfft, yvec) = wigner(rho, xvec, xvec, method='fft')
W = wigner(rho, xvec, yvec, method='iterative')
Wdiff = abs((W - Wfft))
assert_equal((np.sum(abs(Wdiff)) < 1e-07), True)<|docstring|>Wigner: Compare Wigner fft and iterative for rand. ket<|endoftext|> |
b0e6d1725251197356376368b89fd2974f4dea0b8ca4f8ace7a723cf015cae1b | def test_wigner_fft_comparse_dm():
'Wigner: Compare Wigner fft and iterative for rand. dm'
N = 20
xvec = np.linspace((- 10), 10, 128)
for i in range(3):
rho = rand_dm(N)
(Wfft, yvec) = wigner(rho, xvec, xvec, method='fft')
W = wigner(rho, xvec, yvec, method='iterative')
Wdiff = abs((W - Wfft))
assert_equal((np.sum(abs(Wdiff)) < 1e-07), True) | Wigner: Compare Wigner fft and iterative for rand. dm | qutip/tests/test_wigner.py | test_wigner_fft_comparse_dm | paniash/qutip | 1 | python | def test_wigner_fft_comparse_dm():
N = 20
xvec = np.linspace((- 10), 10, 128)
for i in range(3):
rho = rand_dm(N)
(Wfft, yvec) = wigner(rho, xvec, xvec, method='fft')
W = wigner(rho, xvec, yvec, method='iterative')
Wdiff = abs((W - Wfft))
assert_equal((np.sum(abs(Wdiff)) < 1e-07), True) | def test_wigner_fft_comparse_dm():
N = 20
xvec = np.linspace((- 10), 10, 128)
for i in range(3):
rho = rand_dm(N)
(Wfft, yvec) = wigner(rho, xvec, xvec, method='fft')
W = wigner(rho, xvec, yvec, method='iterative')
Wdiff = abs((W - Wfft))
assert_equal((np.sum(abs(Wdiff)) < 1e-07), True)<|docstring|>Wigner: Compare Wigner fft and iterative for rand. dm<|endoftext|> |
39dac2d741704324c91a9de016b327bb4f9ae7bc304dc87e76f559039400b0ac | def test_wigner_clenshaw_iter_dm():
'Wigner: Compare Wigner clenshaw and iterative for rand. dm'
N = 20
xvec = np.linspace((- 10), 10, 128)
for i in range(3):
rho = rand_dm(N)
Wclen = wigner(rho, xvec, xvec, method='clenshaw')
W = wigner(rho, xvec, xvec, method='iterative')
Wdiff = abs((W - Wclen))
assert_equal((np.sum(abs(Wdiff)) < 1e-07), True) | Wigner: Compare Wigner clenshaw and iterative for rand. dm | qutip/tests/test_wigner.py | test_wigner_clenshaw_iter_dm | paniash/qutip | 1 | python | def test_wigner_clenshaw_iter_dm():
N = 20
xvec = np.linspace((- 10), 10, 128)
for i in range(3):
rho = rand_dm(N)
Wclen = wigner(rho, xvec, xvec, method='clenshaw')
W = wigner(rho, xvec, xvec, method='iterative')
Wdiff = abs((W - Wclen))
assert_equal((np.sum(abs(Wdiff)) < 1e-07), True) | def test_wigner_clenshaw_iter_dm():
N = 20
xvec = np.linspace((- 10), 10, 128)
for i in range(3):
rho = rand_dm(N)
Wclen = wigner(rho, xvec, xvec, method='clenshaw')
W = wigner(rho, xvec, xvec, method='iterative')
Wdiff = abs((W - Wclen))
assert_equal((np.sum(abs(Wdiff)) < 1e-07), True)<|docstring|>Wigner: Compare Wigner clenshaw and iterative for rand. dm<|endoftext|> |
d21900ea85edc634c4b428003c9ac69be886a0927a47764d1f429f232162ab3a | def test_wigner_clenshaw_sp_iter_dm():
'Wigner: Compare Wigner sparse clenshaw and iterative for rand. dm'
N = 20
xvec = np.linspace((- 10), 10, 128)
for i in range(3):
rho = rand_dm(N)
Wclen = wigner(rho, xvec, xvec, method='clenshaw', sparse=True)
W = wigner(rho, xvec, xvec, method='iterative')
Wdiff = abs((W - Wclen))
assert_equal((np.sum(abs(Wdiff)) < 1e-07), True) | Wigner: Compare Wigner sparse clenshaw and iterative for rand. dm | qutip/tests/test_wigner.py | test_wigner_clenshaw_sp_iter_dm | paniash/qutip | 1 | python | def test_wigner_clenshaw_sp_iter_dm():
N = 20
xvec = np.linspace((- 10), 10, 128)
for i in range(3):
rho = rand_dm(N)
Wclen = wigner(rho, xvec, xvec, method='clenshaw', sparse=True)
W = wigner(rho, xvec, xvec, method='iterative')
Wdiff = abs((W - Wclen))
assert_equal((np.sum(abs(Wdiff)) < 1e-07), True) | def test_wigner_clenshaw_sp_iter_dm():
N = 20
xvec = np.linspace((- 10), 10, 128)
for i in range(3):
rho = rand_dm(N)
Wclen = wigner(rho, xvec, xvec, method='clenshaw', sparse=True)
W = wigner(rho, xvec, xvec, method='iterative')
Wdiff = abs((W - Wclen))
assert_equal((np.sum(abs(Wdiff)) < 1e-07), True)<|docstring|>Wigner: Compare Wigner sparse clenshaw and iterative for rand. dm<|endoftext|> |
e8ce82f1dd46ed1b7dc17667a3974cd89efb6d7ab50bea51625b7bcf701bac30 | def _get_datetime(element):
'Converts an element to a Python datetime.\n\n Args:\n element: A dataframe element; must be a Timestamp.\n\n Returns:\n The corresponding datatime.\n '
assert isinstance(element, pd.Timestamp)
return element.to_pydatetime() | Converts an element to a Python datetime.
Args:
element: A dataframe element; must be a Timestamp.
Returns:
The corresponding datatime. | purplequery/bq_types.py | _get_datetime | mark-velez/purplequery | 13 | python | def _get_datetime(element):
'Converts an element to a Python datetime.\n\n Args:\n element: A dataframe element; must be a Timestamp.\n\n Returns:\n The corresponding datatime.\n '
assert isinstance(element, pd.Timestamp)
return element.to_pydatetime() | def _get_datetime(element):
'Converts an element to a Python datetime.\n\n Args:\n element: A dataframe element; must be a Timestamp.\n\n Returns:\n The corresponding datatime.\n '
assert isinstance(element, pd.Timestamp)
return element.to_pydatetime()<|docstring|>Converts an element to a Python datetime.
Args:
element: A dataframe element; must be a Timestamp.
Returns:
The corresponding datatime.<|endoftext|> |
bde1e8e55e6ae598b0faac5d10d76fcfb6f619941047eb89cac210478bc60aa2 | def _get_date(timestamp):
'Converts an element to a Python date.\n\n Args:\n element: A dataframe element; must be a Timestamp.\n\n Returns:\n The corresponding date.\n '
assert isinstance(timestamp, pd.Timestamp)
return timestamp.to_pydatetime().date() | Converts an element to a Python date.
Args:
element: A dataframe element; must be a Timestamp.
Returns:
The corresponding date. | purplequery/bq_types.py | _get_date | mark-velez/purplequery | 13 | python | def _get_date(timestamp):
'Converts an element to a Python date.\n\n Args:\n element: A dataframe element; must be a Timestamp.\n\n Returns:\n The corresponding date.\n '
assert isinstance(timestamp, pd.Timestamp)
return timestamp.to_pydatetime().date() | def _get_date(timestamp):
'Converts an element to a Python date.\n\n Args:\n element: A dataframe element; must be a Timestamp.\n\n Returns:\n The corresponding date.\n '
assert isinstance(timestamp, pd.Timestamp)
return timestamp.to_pydatetime().date()<|docstring|>Converts an element to a Python date.
Args:
element: A dataframe element; must be a Timestamp.
Returns:
The corresponding date.<|endoftext|> |
cb5fdc774eef8dece8b724e6ce1712ed04548b006964f713beecfcdd41e922df | def _get_str(s):
"Converts an element to a Python string.\n\n Python 2 and Python 3 have different sets of string-related types, and different rules for\n conversion between those types. The short version is, Python 2 has str and unicode, and strs\n are valid unicode values; Python 3 has str and bytes, and there is no implicit conversion\n between them. An element that has the BQ type STRING might be any of these types, but it needs\n to end up being one of six.string_types, i.e. not bytes. So: if it is a string type coming in,\n we leave it that way, if it's bytes, we do an explicit unicode conversion, and otherwise, it's\n an error.\n\n Args:\n element: A dataframe element; must be a string or bytes\n\n Returns:\n The corresponding string\n "
if isinstance(s, six.string_types):
return s
if isinstance(s, bytes):
return s.decode('utf-8')
raise ValueError('Invalid string {}'.format(s)) | Converts an element to a Python string.
Python 2 and Python 3 have different sets of string-related types, and different rules for
conversion between those types. The short version is, Python 2 has str and unicode, and strs
are valid unicode values; Python 3 has str and bytes, and there is no implicit conversion
between them. An element that has the BQ type STRING might be any of these types, but it needs
to end up being one of six.string_types, i.e. not bytes. So: if it is a string type coming in,
we leave it that way, if it's bytes, we do an explicit unicode conversion, and otherwise, it's
an error.
Args:
element: A dataframe element; must be a string or bytes
Returns:
The corresponding string | purplequery/bq_types.py | _get_str | mark-velez/purplequery | 13 | python | def _get_str(s):
"Converts an element to a Python string.\n\n Python 2 and Python 3 have different sets of string-related types, and different rules for\n conversion between those types. The short version is, Python 2 has str and unicode, and strs\n are valid unicode values; Python 3 has str and bytes, and there is no implicit conversion\n between them. An element that has the BQ type STRING might be any of these types, but it needs\n to end up being one of six.string_types, i.e. not bytes. So: if it is a string type coming in,\n we leave it that way, if it's bytes, we do an explicit unicode conversion, and otherwise, it's\n an error.\n\n Args:\n element: A dataframe element; must be a string or bytes\n\n Returns:\n The corresponding string\n "
if isinstance(s, six.string_types):
return s
if isinstance(s, bytes):
return s.decode('utf-8')
raise ValueError('Invalid string {}'.format(s)) | def _get_str(s):
"Converts an element to a Python string.\n\n Python 2 and Python 3 have different sets of string-related types, and different rules for\n conversion between those types. The short version is, Python 2 has str and unicode, and strs\n are valid unicode values; Python 3 has str and bytes, and there is no implicit conversion\n between them. An element that has the BQ type STRING might be any of these types, but it needs\n to end up being one of six.string_types, i.e. not bytes. So: if it is a string type coming in,\n we leave it that way, if it's bytes, we do an explicit unicode conversion, and otherwise, it's\n an error.\n\n Args:\n element: A dataframe element; must be a string or bytes\n\n Returns:\n The corresponding string\n "
if isinstance(s, six.string_types):
return s
if isinstance(s, bytes):
return s.decode('utf-8')
raise ValueError('Invalid string {}'.format(s))<|docstring|>Converts an element to a Python string.
Python 2 and Python 3 have different sets of string-related types, and different rules for
conversion between those types. The short version is, Python 2 has str and unicode, and strs
are valid unicode values; Python 3 has str and bytes, and there is no implicit conversion
between them. An element that has the BQ type STRING might be any of these types, but it needs
to end up being one of six.string_types, i.e. not bytes. So: if it is a string type coming in,
we leave it that way, if it's bytes, we do an explicit unicode conversion, and otherwise, it's
an error.
Args:
element: A dataframe element; must be a string or bytes
Returns:
The corresponding string<|endoftext|> |
29f09e52b290db67a291aec68b2ec7900728d162e52e53c5ee0411fb01cdcfa4 | def _coerce_names(names):
"Coerce a set of field names. Names agree if equal or if one is None.\n\n This function is called in the context of coercing STRUCT types like\n STRUCT<a INTEGER, b> with STRUCT(1 as a, 7). In that case, it would be called twice, one for\n the first column (to merge the two 'a's into 'a') and once for the second column (to merge 'b'\n with the unnamed column in the second type).\n\n Args:\n names: A sequence of names (strings). These are all names for the *same* field for\n different STRUCT types that are being coerced to a common type.\n Raises:\n ValueError if the names cannot be coerced (if two of the strings are both non-None and\n different).\n Returns:\n The single name matching all the names, or None if no non-empty names were provided.\n "
nonempty_names = {name for name in names if (name is not None)}
if (not nonempty_names):
return None
if (len(nonempty_names) > 1):
raise ValueError('Cannot merge Structs; field names {} do not match'.format(nonempty_names))
return nonempty_names.pop() | Coerce a set of field names. Names agree if equal or if one is None.
This function is called in the context of coercing STRUCT types like
STRUCT<a INTEGER, b> with STRUCT(1 as a, 7). In that case, it would be called twice, one for
the first column (to merge the two 'a's into 'a') and once for the second column (to merge 'b'
with the unnamed column in the second type).
Args:
names: A sequence of names (strings). These are all names for the *same* field for
different STRUCT types that are being coerced to a common type.
Raises:
ValueError if the names cannot be coerced (if two of the strings are both non-None and
different).
Returns:
The single name matching all the names, or None if no non-empty names were provided. | purplequery/bq_types.py | _coerce_names | mark-velez/purplequery | 13 | python | def _coerce_names(names):
"Coerce a set of field names. Names agree if equal or if one is None.\n\n This function is called in the context of coercing STRUCT types like\n STRUCT<a INTEGER, b> with STRUCT(1 as a, 7). In that case, it would be called twice, one for\n the first column (to merge the two 'a's into 'a') and once for the second column (to merge 'b'\n with the unnamed column in the second type).\n\n Args:\n names: A sequence of names (strings). These are all names for the *same* field for\n different STRUCT types that are being coerced to a common type.\n Raises:\n ValueError if the names cannot be coerced (if two of the strings are both non-None and\n different).\n Returns:\n The single name matching all the names, or None if no non-empty names were provided.\n "
nonempty_names = {name for name in names if (name is not None)}
if (not nonempty_names):
return None
if (len(nonempty_names) > 1):
raise ValueError('Cannot merge Structs; field names {} do not match'.format(nonempty_names))
return nonempty_names.pop() | def _coerce_names(names):
"Coerce a set of field names. Names agree if equal or if one is None.\n\n This function is called in the context of coercing STRUCT types like\n STRUCT<a INTEGER, b> with STRUCT(1 as a, 7). In that case, it would be called twice, one for\n the first column (to merge the two 'a's into 'a') and once for the second column (to merge 'b'\n with the unnamed column in the second type).\n\n Args:\n names: A sequence of names (strings). These are all names for the *same* field for\n different STRUCT types that are being coerced to a common type.\n Raises:\n ValueError if the names cannot be coerced (if two of the strings are both non-None and\n different).\n Returns:\n The single name matching all the names, or None if no non-empty names were provided.\n "
nonempty_names = {name for name in names if (name is not None)}
if (not nonempty_names):
return None
if (len(nonempty_names) > 1):
raise ValueError('Cannot merge Structs; field names {} do not match'.format(nonempty_names))
return nonempty_names.pop()<|docstring|>Coerce a set of field names. Names agree if equal or if one is None.
This function is called in the context of coercing STRUCT types like
STRUCT<a INTEGER, b> with STRUCT(1 as a, 7). In that case, it would be called twice, one for
the first column (to merge the two 'a's into 'a') and once for the second column (to merge 'b'
with the unnamed column in the second type).
Args:
names: A sequence of names (strings). These are all names for the *same* field for
different STRUCT types that are being coerced to a common type.
Raises:
ValueError if the names cannot be coerced (if two of the strings are both non-None and
different).
Returns:
The single name matching all the names, or None if no non-empty names were provided.<|endoftext|> |
9e179b6aa5af58b7c93d2a4608e0d2cf67a4b8999ef6eb9a579d9c903a0bd543 | def _coerce_structs(struct_types):
"Coerce a sequence of struct types into one.\n\n Struct types are merged field-by-field. If the number of fields is different, they don't match.\n Two fields are merged by merging the names (see _coerce_names) and the types (recursively).\n\n Args:\n struct_types: a sequence of struct types.\n Raises:\n ValueError: if the types cannot be coerced.\n Returns:\n A single type that matches all the provided types.\n "
if (not struct_types):
return None
num_fieldses = [len(type_.fields) for type_ in struct_types]
if (not all(((num_fields == num_fieldses[0]) for num_fields in num_fieldses[1:]))):
raise ValueError('Cannot merge types {}; number of fields varies!'.format(struct_types))
num_fields = num_fieldses[0]
field_types = [implicitly_coerce(*[type_.types[i] for type_ in struct_types]) for i in range(num_fields)]
field_names = [_coerce_names([type_.fields[i] for type_ in struct_types]) for i in range(num_fields)]
return BQStructType(field_names, field_types) | Coerce a sequence of struct types into one.
Struct types are merged field-by-field. If the number of fields is different, they don't match.
Two fields are merged by merging the names (see _coerce_names) and the types (recursively).
Args:
struct_types: a sequence of struct types.
Raises:
ValueError: if the types cannot be coerced.
Returns:
A single type that matches all the provided types. | purplequery/bq_types.py | _coerce_structs | mark-velez/purplequery | 13 | python | def _coerce_structs(struct_types):
"Coerce a sequence of struct types into one.\n\n Struct types are merged field-by-field. If the number of fields is different, they don't match.\n Two fields are merged by merging the names (see _coerce_names) and the types (recursively).\n\n Args:\n struct_types: a sequence of struct types.\n Raises:\n ValueError: if the types cannot be coerced.\n Returns:\n A single type that matches all the provided types.\n "
if (not struct_types):
return None
num_fieldses = [len(type_.fields) for type_ in struct_types]
if (not all(((num_fields == num_fieldses[0]) for num_fields in num_fieldses[1:]))):
raise ValueError('Cannot merge types {}; number of fields varies!'.format(struct_types))
num_fields = num_fieldses[0]
field_types = [implicitly_coerce(*[type_.types[i] for type_ in struct_types]) for i in range(num_fields)]
field_names = [_coerce_names([type_.fields[i] for type_ in struct_types]) for i in range(num_fields)]
return BQStructType(field_names, field_types) | def _coerce_structs(struct_types):
"Coerce a sequence of struct types into one.\n\n Struct types are merged field-by-field. If the number of fields is different, they don't match.\n Two fields are merged by merging the names (see _coerce_names) and the types (recursively).\n\n Args:\n struct_types: a sequence of struct types.\n Raises:\n ValueError: if the types cannot be coerced.\n Returns:\n A single type that matches all the provided types.\n "
if (not struct_types):
return None
num_fieldses = [len(type_.fields) for type_ in struct_types]
if (not all(((num_fields == num_fieldses[0]) for num_fields in num_fieldses[1:]))):
raise ValueError('Cannot merge types {}; number of fields varies!'.format(struct_types))
num_fields = num_fieldses[0]
field_types = [implicitly_coerce(*[type_.types[i] for type_ in struct_types]) for i in range(num_fields)]
field_names = [_coerce_names([type_.fields[i] for type_ in struct_types]) for i in range(num_fields)]
return BQStructType(field_names, field_types)<|docstring|>Coerce a sequence of struct types into one.
Struct types are merged field-by-field. If the number of fields is different, they don't match.
Two fields are merged by merging the names (see _coerce_names) and the types (recursively).
Args:
struct_types: a sequence of struct types.
Raises:
ValueError: if the types cannot be coerced.
Returns:
A single type that matches all the provided types.<|endoftext|> |
c47df2f19e277f6c18329bc6ed31ef3c43c752bd3704f28a3abf1da007b51227 | def implicitly_coerce(*types):
'Given some number of types, return their common supertype, if any.\n All given types must be implicitly coercible to a common supertype.\n Specifically, INT64 and NUMERIC coerce to FLOAT, and STRING coerces to DATE or TIMESTAMP.\n All other conversions must be specified explicitly.\n\n See: https://cloud.google.com/bigquery/docs/reference/standard-sql/conversion_rules\n And: https://cloud.google.com/bigquery/docs/reference/standard-sql/conditional_expressions\n\n Note that there is no BQScalarType for NUMERIC - it is not supported in Fake BigQuery.\n\n Args:\n types: Types to combine\n Returns:\n A supertype to which all of the given types can be coerced\n '
types = tuple((type_ for type_ in types if (type_ is not None)))
if (len(types) == 0):
raise ValueError('No types provided to merge')
if (len(types) == 1):
return types[0]
if all(((type_ == types[0]) for type_ in types[1:])):
return types[0]
if all(((type_ in [BQScalarType.INTEGER, BQScalarType.FLOAT]) for type_ in types)):
return BQScalarType.FLOAT
if all(((type_ in [BQScalarType.STRING, BQScalarType.DATE]) for type_ in types)):
return BQScalarType.DATE
if all(((type_ in [BQScalarType.STRING, BQScalarType.TIMESTAMP]) for type_ in types)):
return BQScalarType.TIMESTAMP
if all((isinstance(type_, BQStructType) for type_ in types)):
return _coerce_structs(cast(Tuple[(BQStructType, ...)], types))
raise ValueError('Cannot implicitly coerce the given types: {}'.format(types)) | Given some number of types, return their common supertype, if any.
All given types must be implicitly coercible to a common supertype.
Specifically, INT64 and NUMERIC coerce to FLOAT, and STRING coerces to DATE or TIMESTAMP.
All other conversions must be specified explicitly.
See: https://cloud.google.com/bigquery/docs/reference/standard-sql/conversion_rules
And: https://cloud.google.com/bigquery/docs/reference/standard-sql/conditional_expressions
Note that there is no BQScalarType for NUMERIC - it is not supported in Fake BigQuery.
Args:
types: Types to combine
Returns:
A supertype to which all of the given types can be coerced | purplequery/bq_types.py | implicitly_coerce | mark-velez/purplequery | 13 | python | def implicitly_coerce(*types):
'Given some number of types, return their common supertype, if any.\n All given types must be implicitly coercible to a common supertype.\n Specifically, INT64 and NUMERIC coerce to FLOAT, and STRING coerces to DATE or TIMESTAMP.\n All other conversions must be specified explicitly.\n\n See: https://cloud.google.com/bigquery/docs/reference/standard-sql/conversion_rules\n And: https://cloud.google.com/bigquery/docs/reference/standard-sql/conditional_expressions\n\n Note that there is no BQScalarType for NUMERIC - it is not supported in Fake BigQuery.\n\n Args:\n types: Types to combine\n Returns:\n A supertype to which all of the given types can be coerced\n '
types = tuple((type_ for type_ in types if (type_ is not None)))
if (len(types) == 0):
raise ValueError('No types provided to merge')
if (len(types) == 1):
return types[0]
if all(((type_ == types[0]) for type_ in types[1:])):
return types[0]
if all(((type_ in [BQScalarType.INTEGER, BQScalarType.FLOAT]) for type_ in types)):
return BQScalarType.FLOAT
if all(((type_ in [BQScalarType.STRING, BQScalarType.DATE]) for type_ in types)):
return BQScalarType.DATE
if all(((type_ in [BQScalarType.STRING, BQScalarType.TIMESTAMP]) for type_ in types)):
return BQScalarType.TIMESTAMP
if all((isinstance(type_, BQStructType) for type_ in types)):
return _coerce_structs(cast(Tuple[(BQStructType, ...)], types))
raise ValueError('Cannot implicitly coerce the given types: {}'.format(types)) | def implicitly_coerce(*types):
'Given some number of types, return their common supertype, if any.\n All given types must be implicitly coercible to a common supertype.\n Specifically, INT64 and NUMERIC coerce to FLOAT, and STRING coerces to DATE or TIMESTAMP.\n All other conversions must be specified explicitly.\n\n See: https://cloud.google.com/bigquery/docs/reference/standard-sql/conversion_rules\n And: https://cloud.google.com/bigquery/docs/reference/standard-sql/conditional_expressions\n\n Note that there is no BQScalarType for NUMERIC - it is not supported in Fake BigQuery.\n\n Args:\n types: Types to combine\n Returns:\n A supertype to which all of the given types can be coerced\n '
types = tuple((type_ for type_ in types if (type_ is not None)))
if (len(types) == 0):
raise ValueError('No types provided to merge')
if (len(types) == 1):
return types[0]
if all(((type_ == types[0]) for type_ in types[1:])):
return types[0]
if all(((type_ in [BQScalarType.INTEGER, BQScalarType.FLOAT]) for type_ in types)):
return BQScalarType.FLOAT
if all(((type_ in [BQScalarType.STRING, BQScalarType.DATE]) for type_ in types)):
return BQScalarType.DATE
if all(((type_ in [BQScalarType.STRING, BQScalarType.TIMESTAMP]) for type_ in types)):
return BQScalarType.TIMESTAMP
if all((isinstance(type_, BQStructType) for type_ in types)):
return _coerce_structs(cast(Tuple[(BQStructType, ...)], types))
raise ValueError('Cannot implicitly coerce the given types: {}'.format(types))<|docstring|>Given some number of types, return their common supertype, if any.
All given types must be implicitly coercible to a common supertype.
Specifically, INT64 and NUMERIC coerce to FLOAT, and STRING coerces to DATE or TIMESTAMP.
All other conversions must be specified explicitly.
See: https://cloud.google.com/bigquery/docs/reference/standard-sql/conversion_rules
And: https://cloud.google.com/bigquery/docs/reference/standard-sql/conditional_expressions
Note that there is no BQScalarType for NUMERIC - it is not supported in Fake BigQuery.
Args:
types: Types to combine
Returns:
A supertype to which all of the given types can be coerced<|endoftext|> |
8732fbd71ea3a7a9b64fefee82647d89cd739d9acd70edd7ebdf7e6acc0020c5 | def to_dtype(self):
"Converts this BigQuery type to a NumPy dtype.\n\n Returns:\n 'object' dtype, meaning NumPy will not try to interpret the type and will leave it\n as a Python object.\n\n Subclasses may override this function to return a more specific NumPy dtype.\n "
return np.dtype('object') | Converts this BigQuery type to a NumPy dtype.
Returns:
'object' dtype, meaning NumPy will not try to interpret the type and will leave it
as a Python object.
Subclasses may override this function to return a more specific NumPy dtype. | purplequery/bq_types.py | to_dtype | mark-velez/purplequery | 13 | python | def to_dtype(self):
"Converts this BigQuery type to a NumPy dtype.\n\n Returns:\n 'object' dtype, meaning NumPy will not try to interpret the type and will leave it\n as a Python object.\n\n Subclasses may override this function to return a more specific NumPy dtype.\n "
return np.dtype('object') | def to_dtype(self):
"Converts this BigQuery type to a NumPy dtype.\n\n Returns:\n 'object' dtype, meaning NumPy will not try to interpret the type and will leave it\n as a Python object.\n\n Subclasses may override this function to return a more specific NumPy dtype.\n "
return np.dtype('object')<|docstring|>Converts this BigQuery type to a NumPy dtype.
Returns:
'object' dtype, meaning NumPy will not try to interpret the type and will leave it
as a Python object.
Subclasses may override this function to return a more specific NumPy dtype.<|endoftext|> |
869d76368f732d472e7240661416a2a1ee4c46ee66cae258998c5939b762ee21 | @classmethod
def from_schema_field(cls, field):
'Converts from a BigQuery SchemaField object to a BQType subclass.\n\n This is a factory function, that constructs an object of the appropriate child class.\n\n Args:\n field: A BigQuery SchemaField object, the google cloud bigquery Python API\n representation of a column type.\n\n Returns:\n An instance of a BQType subclass that corresponds to the input type.\n '
if (field.mode in ('ARRAY', 'REPEATED')):
return BQArray(BQScalarType.from_string(field.field_type))
return BQScalarType.from_string(field.field_type) | Converts from a BigQuery SchemaField object to a BQType subclass.
This is a factory function, that constructs an object of the appropriate child class.
Args:
field: A BigQuery SchemaField object, the google cloud bigquery Python API
representation of a column type.
Returns:
An instance of a BQType subclass that corresponds to the input type. | purplequery/bq_types.py | from_schema_field | mark-velez/purplequery | 13 | python | @classmethod
def from_schema_field(cls, field):
'Converts from a BigQuery SchemaField object to a BQType subclass.\n\n This is a factory function, that constructs an object of the appropriate child class.\n\n Args:\n field: A BigQuery SchemaField object, the google cloud bigquery Python API\n representation of a column type.\n\n Returns:\n An instance of a BQType subclass that corresponds to the input type.\n '
if (field.mode in ('ARRAY', 'REPEATED')):
return BQArray(BQScalarType.from_string(field.field_type))
return BQScalarType.from_string(field.field_type) | @classmethod
def from_schema_field(cls, field):
'Converts from a BigQuery SchemaField object to a BQType subclass.\n\n This is a factory function, that constructs an object of the appropriate child class.\n\n Args:\n field: A BigQuery SchemaField object, the google cloud bigquery Python API\n representation of a column type.\n\n Returns:\n An instance of a BQType subclass that corresponds to the input type.\n '
if (field.mode in ('ARRAY', 'REPEATED')):
return BQArray(BQScalarType.from_string(field.field_type))
return BQScalarType.from_string(field.field_type)<|docstring|>Converts from a BigQuery SchemaField object to a BQType subclass.
This is a factory function, that constructs an object of the appropriate child class.
Args:
field: A BigQuery SchemaField object, the google cloud bigquery Python API
representation of a column type.
Returns:
An instance of a BQType subclass that corresponds to the input type.<|endoftext|> |
8bb4a90e6bb4167333bdbb667782f22cd930abae1df9fcfcf63076b8ef1e0e56 | @abstractmethod
def to_schema_field(self, name):
"Converts this type to a BigQuery SchemaField.\n\n Args:\n name: The name of the column. This class represents a type; SchemaField represents\n a column, so it includes the type and also the name of the column.\n Returns:\n A SchemaField object corresponding to a column containing this class' type.\n\n This abstract method always raises NotImplementedError; child classes will override\n with an appropriate implementation.\n " | Converts this type to a BigQuery SchemaField.
Args:
name: The name of the column. This class represents a type; SchemaField represents
a column, so it includes the type and also the name of the column.
Returns:
A SchemaField object corresponding to a column containing this class' type.
This abstract method always raises NotImplementedError; child classes will override
with an appropriate implementation. | purplequery/bq_types.py | to_schema_field | mark-velez/purplequery | 13 | python | @abstractmethod
def to_schema_field(self, name):
"Converts this type to a BigQuery SchemaField.\n\n Args:\n name: The name of the column. This class represents a type; SchemaField represents\n a column, so it includes the type and also the name of the column.\n Returns:\n A SchemaField object corresponding to a column containing this class' type.\n\n This abstract method always raises NotImplementedError; child classes will override\n with an appropriate implementation.\n " | @abstractmethod
def to_schema_field(self, name):
"Converts this type to a BigQuery SchemaField.\n\n Args:\n name: The name of the column. This class represents a type; SchemaField represents\n a column, so it includes the type and also the name of the column.\n Returns:\n A SchemaField object corresponding to a column containing this class' type.\n\n This abstract method always raises NotImplementedError; child classes will override\n with an appropriate implementation.\n "<|docstring|>Converts this type to a BigQuery SchemaField.
Args:
name: The name of the column. This class represents a type; SchemaField represents
a column, so it includes the type and also the name of the column.
Returns:
A SchemaField object corresponding to a column containing this class' type.
This abstract method always raises NotImplementedError; child classes will override
with an appropriate implementation.<|endoftext|> |
8bf0e145ac0425702d6e5755ee80d697949f4023c97352c9d9de01d12ee2fbd4 | @abstractmethod
def convert(self, element):
"Converts a pandas Series element to a Python type corresponding to this BigQuery type.\n\n Args:\n element: One cell of a Pandas DataFrame or Series. Will have numpy types like np.int64\n rather than a Python type like int.\n\n Returns:\n The element's value, cast to a corresponding Python type.\n " | Converts a pandas Series element to a Python type corresponding to this BigQuery type.
Args:
element: One cell of a Pandas DataFrame or Series. Will have numpy types like np.int64
rather than a Python type like int.
Returns:
The element's value, cast to a corresponding Python type. | purplequery/bq_types.py | convert | mark-velez/purplequery | 13 | python | @abstractmethod
def convert(self, element):
"Converts a pandas Series element to a Python type corresponding to this BigQuery type.\n\n Args:\n element: One cell of a Pandas DataFrame or Series. Will have numpy types like np.int64\n rather than a Python type like int.\n\n Returns:\n The element's value, cast to a corresponding Python type.\n " | @abstractmethod
def convert(self, element):
"Converts a pandas Series element to a Python type corresponding to this BigQuery type.\n\n Args:\n element: One cell of a Pandas DataFrame or Series. Will have numpy types like np.int64\n rather than a Python type like int.\n\n Returns:\n The element's value, cast to a corresponding Python type.\n "<|docstring|>Converts a pandas Series element to a Python type corresponding to this BigQuery type.
Args:
element: One cell of a Pandas DataFrame or Series. Will have numpy types like np.int64
rather than a Python type like int.
Returns:
The element's value, cast to a corresponding Python type.<|endoftext|> |
46276d72653ded34bde24ec0fa88001d515ccb6eb64c6a595d6d5bb68e743ed6 | def to_dtype(self):
"Converts this BigQuery type to a NumPy dtype.\n\n Returns:\n A NumPy dtype corresponding to this BigQuery type (e.g. np.dtype('int64') for INTEGER)\n "
return np.dtype(_BQ_SCALAR_TYPE_TO_NUMPY_TYPE[self]) | Converts this BigQuery type to a NumPy dtype.
Returns:
A NumPy dtype corresponding to this BigQuery type (e.g. np.dtype('int64') for INTEGER) | purplequery/bq_types.py | to_dtype | mark-velez/purplequery | 13 | python | def to_dtype(self):
"Converts this BigQuery type to a NumPy dtype.\n\n Returns:\n A NumPy dtype corresponding to this BigQuery type (e.g. np.dtype('int64') for INTEGER)\n "
return np.dtype(_BQ_SCALAR_TYPE_TO_NUMPY_TYPE[self]) | def to_dtype(self):
"Converts this BigQuery type to a NumPy dtype.\n\n Returns:\n A NumPy dtype corresponding to this BigQuery type (e.g. np.dtype('int64') for INTEGER)\n "
return np.dtype(_BQ_SCALAR_TYPE_TO_NUMPY_TYPE[self])<|docstring|>Converts this BigQuery type to a NumPy dtype.
Returns:
A NumPy dtype corresponding to this BigQuery type (e.g. np.dtype('int64') for INTEGER)<|endoftext|> |
e2bfd521eb8f21f9b67e8b8c9f2570e5248b6f675b74d9d6759ce37b75752447 | @classmethod
def from_string(cls, typename):
'Reads this type from a string representation.\n\n A Factory method constructing an instance of BQScalarType corresponding to typename.\n The reason for not just using the inherited Enum constructor is to allow Standard BigQuery\n typenames to be aliases for Legacy typenames.\n\n Args:\n typename: A BigQuery type name, either Legacy (INTEGER, FLOAT, ...) or Standard\n (INT64, FLOAT64, ...).\n\n Returns:\n The corresponding BQScalarType enum.\n '
if isinstance(typename, str):
typename = typename.upper()
if (typename in _LEGACY_BQ_SCALAR_TYPE_FROM_BQ_SCALAR_TYPE):
return _LEGACY_BQ_SCALAR_TYPE_FROM_BQ_SCALAR_TYPE[typename]
return cls(typename) | Reads this type from a string representation.
A Factory method constructing an instance of BQScalarType corresponding to typename.
The reason for not just using the inherited Enum constructor is to allow Standard BigQuery
typenames to be aliases for Legacy typenames.
Args:
typename: A BigQuery type name, either Legacy (INTEGER, FLOAT, ...) or Standard
(INT64, FLOAT64, ...).
Returns:
The corresponding BQScalarType enum. | purplequery/bq_types.py | from_string | mark-velez/purplequery | 13 | python | @classmethod
def from_string(cls, typename):
'Reads this type from a string representation.\n\n A Factory method constructing an instance of BQScalarType corresponding to typename.\n The reason for not just using the inherited Enum constructor is to allow Standard BigQuery\n typenames to be aliases for Legacy typenames.\n\n Args:\n typename: A BigQuery type name, either Legacy (INTEGER, FLOAT, ...) or Standard\n (INT64, FLOAT64, ...).\n\n Returns:\n The corresponding BQScalarType enum.\n '
if isinstance(typename, str):
typename = typename.upper()
if (typename in _LEGACY_BQ_SCALAR_TYPE_FROM_BQ_SCALAR_TYPE):
return _LEGACY_BQ_SCALAR_TYPE_FROM_BQ_SCALAR_TYPE[typename]
return cls(typename) | @classmethod
def from_string(cls, typename):
'Reads this type from a string representation.\n\n A Factory method constructing an instance of BQScalarType corresponding to typename.\n The reason for not just using the inherited Enum constructor is to allow Standard BigQuery\n typenames to be aliases for Legacy typenames.\n\n Args:\n typename: A BigQuery type name, either Legacy (INTEGER, FLOAT, ...) or Standard\n (INT64, FLOAT64, ...).\n\n Returns:\n The corresponding BQScalarType enum.\n '
if isinstance(typename, str):
typename = typename.upper()
if (typename in _LEGACY_BQ_SCALAR_TYPE_FROM_BQ_SCALAR_TYPE):
return _LEGACY_BQ_SCALAR_TYPE_FROM_BQ_SCALAR_TYPE[typename]
return cls(typename)<|docstring|>Reads this type from a string representation.
A Factory method constructing an instance of BQScalarType corresponding to typename.
The reason for not just using the inherited Enum constructor is to allow Standard BigQuery
typenames to be aliases for Legacy typenames.
Args:
typename: A BigQuery type name, either Legacy (INTEGER, FLOAT, ...) or Standard
(INT64, FLOAT64, ...).
Returns:
The corresponding BQScalarType enum.<|endoftext|> |
db462ecf758b511a6a79ea0ef6d9179fa1c85bc3151d77318ea226c9eb269d05 | def to_schema_field(self, name):
"Converts this type to a BigQuery SchemaField.\n\n Args:\n name: The name of the column. This class represents a type; SchemaField represents\n a column, so it includes the type and also the name of the column.\n\n Returns:\n A SchemaField object corresponding to a column containing this class' type.\n "
return SchemaField(name=name, field_type=self.value) | Converts this type to a BigQuery SchemaField.
Args:
name: The name of the column. This class represents a type; SchemaField represents
a column, so it includes the type and also the name of the column.
Returns:
A SchemaField object corresponding to a column containing this class' type. | purplequery/bq_types.py | to_schema_field | mark-velez/purplequery | 13 | python | def to_schema_field(self, name):
"Converts this type to a BigQuery SchemaField.\n\n Args:\n name: The name of the column. This class represents a type; SchemaField represents\n a column, so it includes the type and also the name of the column.\n\n Returns:\n A SchemaField object corresponding to a column containing this class' type.\n "
return SchemaField(name=name, field_type=self.value) | def to_schema_field(self, name):
"Converts this type to a BigQuery SchemaField.\n\n Args:\n name: The name of the column. This class represents a type; SchemaField represents\n a column, so it includes the type and also the name of the column.\n\n Returns:\n A SchemaField object corresponding to a column containing this class' type.\n "
return SchemaField(name=name, field_type=self.value)<|docstring|>Converts this type to a BigQuery SchemaField.
Args:
name: The name of the column. This class represents a type; SchemaField represents
a column, so it includes the type and also the name of the column.
Returns:
A SchemaField object corresponding to a column containing this class' type.<|endoftext|> |
e927201fd0d70ddf7621d97792a50f0a2a1c358827587f1b43621cb4a685e070 | def convert(self, element):
"Converts a pandas Series element to a Python type corresponding to this BigQuery type.\n\n Args:\n element: One cell of a Pandas DataFrame or Series. Will have numpy types like np.int64\n rather than a Python type like int.\n\n Returns:\n The element's value, cast to a corresponding Python type.\n "
if pd.isnull(element):
return None
return _BQ_SCALAR_TYPE_TO_PYTHON_TYPE[self](element) | Converts a pandas Series element to a Python type corresponding to this BigQuery type.
Args:
element: One cell of a Pandas DataFrame or Series. Will have numpy types like np.int64
rather than a Python type like int.
Returns:
The element's value, cast to a corresponding Python type. | purplequery/bq_types.py | convert | mark-velez/purplequery | 13 | python | def convert(self, element):
"Converts a pandas Series element to a Python type corresponding to this BigQuery type.\n\n Args:\n element: One cell of a Pandas DataFrame or Series. Will have numpy types like np.int64\n rather than a Python type like int.\n\n Returns:\n The element's value, cast to a corresponding Python type.\n "
if pd.isnull(element):
return None
return _BQ_SCALAR_TYPE_TO_PYTHON_TYPE[self](element) | def convert(self, element):
"Converts a pandas Series element to a Python type corresponding to this BigQuery type.\n\n Args:\n element: One cell of a Pandas DataFrame or Series. Will have numpy types like np.int64\n rather than a Python type like int.\n\n Returns:\n The element's value, cast to a corresponding Python type.\n "
if pd.isnull(element):
return None
return _BQ_SCALAR_TYPE_TO_PYTHON_TYPE[self](element)<|docstring|>Converts a pandas Series element to a Python type corresponding to this BigQuery type.
Args:
element: One cell of a Pandas DataFrame or Series. Will have numpy types like np.int64
rather than a Python type like int.
Returns:
The element's value, cast to a corresponding Python type.<|endoftext|> |
c333837a81b63c7113e95f28a31a924e09467885f9ee4c47aca8e21e7da2ac7f | def __new__(cls, fields, types):
'Ensures that there is only one instance of BQStruct per component type.\n\n Args:\n fields: A list of optional string field names\n types: A list of optional types.\n\n Returns:\n A singleton Struct type object containing the provided type_\n '
key = (tuple(fields), tuple(types))
if (key not in cls._STRUCT_TYPE_OBJECTS):
struct = super(BQStructType, cls).__new__(cls)
struct.__init__(fields, types)
cls._STRUCT_TYPE_OBJECTS[key] = struct
return cls._STRUCT_TYPE_OBJECTS[key] | Ensures that there is only one instance of BQStruct per component type.
Args:
fields: A list of optional string field names
types: A list of optional types.
Returns:
A singleton Struct type object containing the provided type_ | purplequery/bq_types.py | __new__ | mark-velez/purplequery | 13 | python | def __new__(cls, fields, types):
'Ensures that there is only one instance of BQStruct per component type.\n\n Args:\n fields: A list of optional string field names\n types: A list of optional types.\n\n Returns:\n A singleton Struct type object containing the provided type_\n '
key = (tuple(fields), tuple(types))
if (key not in cls._STRUCT_TYPE_OBJECTS):
struct = super(BQStructType, cls).__new__(cls)
struct.__init__(fields, types)
cls._STRUCT_TYPE_OBJECTS[key] = struct
return cls._STRUCT_TYPE_OBJECTS[key] | def __new__(cls, fields, types):
'Ensures that there is only one instance of BQStruct per component type.\n\n Args:\n fields: A list of optional string field names\n types: A list of optional types.\n\n Returns:\n A singleton Struct type object containing the provided type_\n '
key = (tuple(fields), tuple(types))
if (key not in cls._STRUCT_TYPE_OBJECTS):
struct = super(BQStructType, cls).__new__(cls)
struct.__init__(fields, types)
cls._STRUCT_TYPE_OBJECTS[key] = struct
return cls._STRUCT_TYPE_OBJECTS[key]<|docstring|>Ensures that there is only one instance of BQStruct per component type.
Args:
fields: A list of optional string field names
types: A list of optional types.
Returns:
A singleton Struct type object containing the provided type_<|endoftext|> |
ef5378c1ce8571ee8f141054e567abe9ac82d4c083262ab005c630881ee3c1c0 | def to_schema_field(self, name):
"Converts this type to a BigQuery SchemaField.\n\n Args:\n name: The name of the column. This class represents a type; SchemaField represents\n a column, so it includes the type and also the name of the column.\n\n Returns:\n A SchemaField object corresponding to a column containing this class' type.\n "
raise NotImplementedError('SchemaField for STRUCT not implemented') | Converts this type to a BigQuery SchemaField.
Args:
name: The name of the column. This class represents a type; SchemaField represents
a column, so it includes the type and also the name of the column.
Returns:
A SchemaField object corresponding to a column containing this class' type. | purplequery/bq_types.py | to_schema_field | mark-velez/purplequery | 13 | python | def to_schema_field(self, name):
"Converts this type to a BigQuery SchemaField.\n\n Args:\n name: The name of the column. This class represents a type; SchemaField represents\n a column, so it includes the type and also the name of the column.\n\n Returns:\n A SchemaField object corresponding to a column containing this class' type.\n "
raise NotImplementedError('SchemaField for STRUCT not implemented') | def to_schema_field(self, name):
"Converts this type to a BigQuery SchemaField.\n\n Args:\n name: The name of the column. This class represents a type; SchemaField represents\n a column, so it includes the type and also the name of the column.\n\n Returns:\n A SchemaField object corresponding to a column containing this class' type.\n "
raise NotImplementedError('SchemaField for STRUCT not implemented')<|docstring|>Converts this type to a BigQuery SchemaField.
Args:
name: The name of the column. This class represents a type; SchemaField represents
a column, so it includes the type and also the name of the column.
Returns:
A SchemaField object corresponding to a column containing this class' type.<|endoftext|> |
aa3c9d2fb95ed63c6f286dcd58375110f3fe1c1b8c0b4f4092d8995b6b73d458 | def __new__(cls, type_):
'Ensures that there is only one instance of BQArray per component type.\n\n Args:\n type: A scalar type object.\n\n Returns:\n A singleton Array type object containing the provided type_\n '
if (type_ not in cls._ARRAY_TYPE_OBJECTS):
array = super(BQArray, cls).__new__(cls)
array.__init__(type_)
cls._ARRAY_TYPE_OBJECTS[type_] = array
return cls._ARRAY_TYPE_OBJECTS[type_] | Ensures that there is only one instance of BQArray per component type.
Args:
type: A scalar type object.
Returns:
A singleton Array type object containing the provided type_ | purplequery/bq_types.py | __new__ | mark-velez/purplequery | 13 | python | def __new__(cls, type_):
'Ensures that there is only one instance of BQArray per component type.\n\n Args:\n type: A scalar type object.\n\n Returns:\n A singleton Array type object containing the provided type_\n '
if (type_ not in cls._ARRAY_TYPE_OBJECTS):
array = super(BQArray, cls).__new__(cls)
array.__init__(type_)
cls._ARRAY_TYPE_OBJECTS[type_] = array
return cls._ARRAY_TYPE_OBJECTS[type_] | def __new__(cls, type_):
'Ensures that there is only one instance of BQArray per component type.\n\n Args:\n type: A scalar type object.\n\n Returns:\n A singleton Array type object containing the provided type_\n '
if (type_ not in cls._ARRAY_TYPE_OBJECTS):
array = super(BQArray, cls).__new__(cls)
array.__init__(type_)
cls._ARRAY_TYPE_OBJECTS[type_] = array
return cls._ARRAY_TYPE_OBJECTS[type_]<|docstring|>Ensures that there is only one instance of BQArray per component type.
Args:
type: A scalar type object.
Returns:
A singleton Array type object containing the provided type_<|endoftext|> |
ba62d4d49d8158a45e8097e9a594fa2fac2ee5ee64383a6c7c744a0cfcf90049 | def to_schema_field(self, name):
"Converts this type to a BigQuery SchemaField.\n\n Args:\n name: The name of the column. This class represents a type; SchemaField represents\n a column, so it includes the type and also the name of the column.\n\n Returns:\n A SchemaField object corresponding to a column containing this class' type.\n "
if isinstance(self.type_, BQScalarType):
return SchemaField(name=name, field_type=self.type_.value, mode='REPEATED')
raise NotImplementedError('SchemaField for ARRAY of {} not implemented'.format(self.type_)) | Converts this type to a BigQuery SchemaField.
Args:
name: The name of the column. This class represents a type; SchemaField represents
a column, so it includes the type and also the name of the column.
Returns:
A SchemaField object corresponding to a column containing this class' type. | purplequery/bq_types.py | to_schema_field | mark-velez/purplequery | 13 | python | def to_schema_field(self, name):
"Converts this type to a BigQuery SchemaField.\n\n Args:\n name: The name of the column. This class represents a type; SchemaField represents\n a column, so it includes the type and also the name of the column.\n\n Returns:\n A SchemaField object corresponding to a column containing this class' type.\n "
if isinstance(self.type_, BQScalarType):
return SchemaField(name=name, field_type=self.type_.value, mode='REPEATED')
raise NotImplementedError('SchemaField for ARRAY of {} not implemented'.format(self.type_)) | def to_schema_field(self, name):
"Converts this type to a BigQuery SchemaField.\n\n Args:\n name: The name of the column. This class represents a type; SchemaField represents\n a column, so it includes the type and also the name of the column.\n\n Returns:\n A SchemaField object corresponding to a column containing this class' type.\n "
if isinstance(self.type_, BQScalarType):
return SchemaField(name=name, field_type=self.type_.value, mode='REPEATED')
raise NotImplementedError('SchemaField for ARRAY of {} not implemented'.format(self.type_))<|docstring|>Converts this type to a BigQuery SchemaField.
Args:
name: The name of the column. This class represents a type; SchemaField represents
a column, so it includes the type and also the name of the column.
Returns:
A SchemaField object corresponding to a column containing this class' type.<|endoftext|> |
b13d0da07b04efbbf46b12933bd5ca4d02c873a333cf5a91e4b5e8e26e538247 | def convert(self, element):
"Converts a pandas Series element to a Python type corresponding to this BigQuery type.\n\n Args:\n element: One cell of a Pandas DataFrame or Series. Will have numpy types like np.int64\n rather than a Python type like int.\n\n Returns:\n The element's value, cast to a corresponding Python type.\n "
isnull = pd.isnull(element)
if ((not isinstance(isnull, np.ndarray)) and isnull):
return None
if (not isinstance(element, tuple)):
raise ValueError("Array typed object {!r} isn't a tuple".format(element))
return tuple((self.type_.convert(subelement) for subelement in element)) | Converts a pandas Series element to a Python type corresponding to this BigQuery type.
Args:
element: One cell of a Pandas DataFrame or Series. Will have numpy types like np.int64
rather than a Python type like int.
Returns:
The element's value, cast to a corresponding Python type. | purplequery/bq_types.py | convert | mark-velez/purplequery | 13 | python | def convert(self, element):
"Converts a pandas Series element to a Python type corresponding to this BigQuery type.\n\n Args:\n element: One cell of a Pandas DataFrame or Series. Will have numpy types like np.int64\n rather than a Python type like int.\n\n Returns:\n The element's value, cast to a corresponding Python type.\n "
isnull = pd.isnull(element)
if ((not isinstance(isnull, np.ndarray)) and isnull):
return None
if (not isinstance(element, tuple)):
raise ValueError("Array typed object {!r} isn't a tuple".format(element))
return tuple((self.type_.convert(subelement) for subelement in element)) | def convert(self, element):
"Converts a pandas Series element to a Python type corresponding to this BigQuery type.\n\n Args:\n element: One cell of a Pandas DataFrame or Series. Will have numpy types like np.int64\n rather than a Python type like int.\n\n Returns:\n The element's value, cast to a corresponding Python type.\n "
isnull = pd.isnull(element)
if ((not isinstance(isnull, np.ndarray)) and isnull):
return None
if (not isinstance(element, tuple)):
raise ValueError("Array typed object {!r} isn't a tuple".format(element))
return tuple((self.type_.convert(subelement) for subelement in element))<|docstring|>Converts a pandas Series element to a Python type corresponding to this BigQuery type.
Args:
element: One cell of a Pandas DataFrame or Series. Will have numpy types like np.int64
rather than a Python type like int.
Returns:
The element's value, cast to a corresponding Python type.<|endoftext|> |
09419861c7f8d1a5908ab32368fe0927e34a7e458d0e81e8ed00befc8ba50759 | @property
def series(self):
'Returns just the column of data.'
return self._series | Returns just the column of data. | purplequery/bq_types.py | series | mark-velez/purplequery | 13 | python | @property
def series(self):
return self._series | @property
def series(self):
return self._series<|docstring|>Returns just the column of data.<|endoftext|> |
aefa02ce05f9a7cfc57ce227b355f98e3d716818f4216275adc1f9ffe0f7598f | @property
def type_(self):
'Returns just the type of the data.'
return self._type | Returns just the type of the data. | purplequery/bq_types.py | type_ | mark-velez/purplequery | 13 | python | @property
def type_(self):
return self._type | @property
def type_(self):
return self._type<|docstring|>Returns just the type of the data.<|endoftext|> |
d3d218098dbe7c435cdd14c2ec4a1fbb3cd688c350ac54514415c533f531b6a3 | @property
def dataframe(self):
'Returns the column of data cast to a one-column table.'
return pd.DataFrame(self.series) | Returns the column of data cast to a one-column table. | purplequery/bq_types.py | dataframe | mark-velez/purplequery | 13 | python | @property
def dataframe(self):
return pd.DataFrame(self.series) | @property
def dataframe(self):
return pd.DataFrame(self.series)<|docstring|>Returns the column of data cast to a one-column table.<|endoftext|> |
051e1697466daf9b00818c3ab98d0138c81a5c9c745a01cea7c4f7afc71b5abc | @property
def types(self):
'Returns the data type cast to a one-element list.'
return [self.type_] | Returns the data type cast to a one-element list. | purplequery/bq_types.py | types | mark-velez/purplequery | 13 | python | @property
def types(self):
return [self.type_] | @property
def types(self):
return [self.type_]<|docstring|>Returns the data type cast to a one-element list.<|endoftext|> |
4da8df884a02f6e9254761305af66d00d7bd446eaaf65232111699b622ce76d8 | def to_list(self):
'Returns the column as a list of Python-typed objects.'
return [self.type_.convert(element) for element in self.series] | Returns the column as a list of Python-typed objects. | purplequery/bq_types.py | to_list | mark-velez/purplequery | 13 | python | def to_list(self):
return [self.type_.convert(element) for element in self.series] | def to_list(self):
return [self.type_.convert(element) for element in self.series]<|docstring|>Returns the column as a list of Python-typed objects.<|endoftext|> |
7c2732e81757907e4c610f18e42df33a15aadf994dca9f4b7253be16c97e4eb7 | @property
def dataframe(self):
"Returns the underlying DataFrame.\n\n This is a property so that it's immutable.\n\n Returns:\n The actual tabular data.\n "
return self._dataframe | Returns the underlying DataFrame.
This is a property so that it's immutable.
Returns:
The actual tabular data. | purplequery/bq_types.py | dataframe | mark-velez/purplequery | 13 | python | @property
def dataframe(self):
"Returns the underlying DataFrame.\n\n This is a property so that it's immutable.\n\n Returns:\n The actual tabular data.\n "
return self._dataframe | @property
def dataframe(self):
"Returns the underlying DataFrame.\n\n This is a property so that it's immutable.\n\n Returns:\n The actual tabular data.\n "
return self._dataframe<|docstring|>Returns the underlying DataFrame.
This is a property so that it's immutable.
Returns:
The actual tabular data.<|endoftext|> |
1d170eeed407181132a6d34afa4b03451fdb5f7d6703a0a6a81e2367778286d1 | @property
def types(self):
"Returns the data types, in the same order as the table's columns."
return self._types | Returns the data types, in the same order as the table's columns. | purplequery/bq_types.py | types | mark-velez/purplequery | 13 | python | @property
def types(self):
return self._types | @property
def types(self):
return self._types<|docstring|>Returns the data types, in the same order as the table's columns.<|endoftext|> |
54a4c845fb36f600ff5b3afa324b86a796b65d7c94c6f089c4b4de07c80c8cb7 | def to_bq_schema(self):
"Returns a BigQuery schema (list of schema fields) matching this object's types."
return [type_.to_schema_field(name) for (name, type_) in zip(self.dataframe.columns, self.types)] | Returns a BigQuery schema (list of schema fields) matching this object's types. | purplequery/bq_types.py | to_bq_schema | mark-velez/purplequery | 13 | python | def to_bq_schema(self):
return [type_.to_schema_field(name) for (name, type_) in zip(self.dataframe.columns, self.types)] | def to_bq_schema(self):
return [type_.to_schema_field(name) for (name, type_) in zip(self.dataframe.columns, self.types)]<|docstring|>Returns a BigQuery schema (list of schema fields) matching this object's types.<|endoftext|> |
031934e413055d1d5c0a5de73405f21bd0e09204aea87338e30d7caa3e78c4d1 | def to_list_of_lists(self):
'Returns the data as a list of rows, each row a list of Python-typed objects.'
rows = []
for (unused_index, row) in self.dataframe.iterrows():
rows.append([type_.convert(element) for (element, type_) in zip(list(row), self.types)])
return rows | Returns the data as a list of rows, each row a list of Python-typed objects. | purplequery/bq_types.py | to_list_of_lists | mark-velez/purplequery | 13 | python | def to_list_of_lists(self):
rows = []
for (unused_index, row) in self.dataframe.iterrows():
rows.append([type_.convert(element) for (element, type_) in zip(list(row), self.types)])
return rows | def to_list_of_lists(self):
rows = []
for (unused_index, row) in self.dataframe.iterrows():
rows.append([type_.convert(element) for (element, type_) in zip(list(row), self.types)])
return rows<|docstring|>Returns the data as a list of rows, each row a list of Python-typed objects.<|endoftext|> |
e34792f137f5ff58bd468eba96dbaee529bd1090dd3f36f5bc5cfe64b0e2f116 | def __init__(self, default_db, sql):
"Constructor.\n\n Args:\n default_db (str): Default database's name.\n sql (str): One sql text.\n\n "
self.sql = sql
self.default_db = default_db | Constructor.
Args:
default_db (str): Default database's name.
sql (str): One sql text. | sqlextractor/SqlExtractor.py | __init__ | StefanLim0/mysql-er | 8 | python | def __init__(self, default_db, sql):
"Constructor.\n\n Args:\n default_db (str): Default database's name.\n sql (str): One sql text.\n\n "
self.sql = sql
self.default_db = default_db | def __init__(self, default_db, sql):
"Constructor.\n\n Args:\n default_db (str): Default database's name.\n sql (str): One sql text.\n\n "
self.sql = sql
self.default_db = default_db<|docstring|>Constructor.
Args:
default_db (str): Default database's name.
sql (str): One sql text.<|endoftext|> |
8b5f11f19830731e1690c15ed7b5171b34c1ec4d0362ebd323237fe5c79386ea | def get_sqls(self):
"This function extracts sqls.\n\n Returns:\n A list of :class:`SQL`. For example:\n [SQL('', u'select a.id, b.name from db.ac a join db.bc b on a.id=b.id or a.id=b.iid where a.cnt > 10')]\n\n "
return [] | This function extracts sqls.
Returns:
A list of :class:`SQL`. For example:
[SQL('', u'select a.id, b.name from db.ac a join db.bc b on a.id=b.id or a.id=b.iid where a.cnt > 10')] | sqlextractor/SqlExtractor.py | get_sqls | StefanLim0/mysql-er | 8 | python | def get_sqls(self):
"This function extracts sqls.\n\n Returns:\n A list of :class:`SQL`. For example:\n [SQL(, u'select a.id, b.name from db.ac a join db.bc b on a.id=b.id or a.id=b.iid where a.cnt > 10')]\n\n "
return [] | def get_sqls(self):
"This function extracts sqls.\n\n Returns:\n A list of :class:`SQL`. For example:\n [SQL(, u'select a.id, b.name from db.ac a join db.bc b on a.id=b.id or a.id=b.iid where a.cnt > 10')]\n\n "
return []<|docstring|>This function extracts sqls.
Returns:
A list of :class:`SQL`. For example:
[SQL('', u'select a.id, b.name from db.ac a join db.bc b on a.id=b.id or a.id=b.iid where a.cnt > 10')]<|endoftext|> |
c028352f37e2bba15bd4fbfb7a9d2df3bca1753916065046e6976f4601555a14 | def __init__(self, path):
'Constructor.\n\n Args:\n path (str): File path.\n\n '
self.path = path | Constructor.
Args:
path (str): File path. | sqlextractor/SqlExtractor.py | __init__ | StefanLim0/mysql-er | 8 | python | def __init__(self, path):
'Constructor.\n\n Args:\n path (str): File path.\n\n '
self.path = path | def __init__(self, path):
'Constructor.\n\n Args:\n path (str): File path.\n\n '
self.path = path<|docstring|>Constructor.
Args:
path (str): File path.<|endoftext|> |
d979a745fb91416d1a187917f22444cd65ea3a625b27af9cfad0ebf15c7f73e5 | def get_sqls(self):
"This function extracts sqls from the text file.\n\n Returns:\n A list of :class:`SQL`. For example:\n [SQL('', u'select a.id, b.name from db.ac a join db.bc b on a.id=b.id or a.id=b.iid where a.cnt > 10')]\n\n "
with codecs.open(self.path, 'r', 'utf-8') as f:
return filter((lambda _: (_ != '')), [SQL('', _.strip()) for _ in f.read().split(';')]) | This function extracts sqls from the text file.
Returns:
A list of :class:`SQL`. For example:
[SQL('', u'select a.id, b.name from db.ac a join db.bc b on a.id=b.id or a.id=b.iid where a.cnt > 10')] | sqlextractor/SqlExtractor.py | get_sqls | StefanLim0/mysql-er | 8 | python | def get_sqls(self):
"This function extracts sqls from the text file.\n\n Returns:\n A list of :class:`SQL`. For example:\n [SQL(, u'select a.id, b.name from db.ac a join db.bc b on a.id=b.id or a.id=b.iid where a.cnt > 10')]\n\n "
with codecs.open(self.path, 'r', 'utf-8') as f:
return filter((lambda _: (_ != )), [SQL(, _.strip()) for _ in f.read().split(';')]) | def get_sqls(self):
"This function extracts sqls from the text file.\n\n Returns:\n A list of :class:`SQL`. For example:\n [SQL(, u'select a.id, b.name from db.ac a join db.bc b on a.id=b.id or a.id=b.iid where a.cnt > 10')]\n\n "
with codecs.open(self.path, 'r', 'utf-8') as f:
return filter((lambda _: (_ != )), [SQL(, _.strip()) for _ in f.read().split(';')])<|docstring|>This function extracts sqls from the text file.
Returns:
A list of :class:`SQL`. For example:
[SQL('', u'select a.id, b.name from db.ac a join db.bc b on a.id=b.id or a.id=b.iid where a.cnt > 10')]<|endoftext|> |
b7dc386c3ee6ba99da9bcd79d38d2c201f95cef3d90c569c6131227b8f899684 | def __init__(self, dir, encoding='utf-8'):
'Constructor.\n\n Args:\n dir (str): directory path.\n\n Kwargs:\n encoding (str): file encoding type, e.g., *utf-8*\n\n '
self.dir = dir
self.encoding = encoding | Constructor.
Args:
dir (str): directory path.
Kwargs:
encoding (str): file encoding type, e.g., *utf-8* | sqlextractor/SqlExtractor.py | __init__ | StefanLim0/mysql-er | 8 | python | def __init__(self, dir, encoding='utf-8'):
'Constructor.\n\n Args:\n dir (str): directory path.\n\n Kwargs:\n encoding (str): file encoding type, e.g., *utf-8*\n\n '
self.dir = dir
self.encoding = encoding | def __init__(self, dir, encoding='utf-8'):
'Constructor.\n\n Args:\n dir (str): directory path.\n\n Kwargs:\n encoding (str): file encoding type, e.g., *utf-8*\n\n '
self.dir = dir
self.encoding = encoding<|docstring|>Constructor.
Args:
dir (str): directory path.
Kwargs:
encoding (str): file encoding type, e.g., *utf-8*<|endoftext|> |
8da4aa55ca039fd48b51d8c08c8d9c33038bd80679cde6238f6688443bb01566 | def get_sqls(self):
"This function extracts sqls from the java files with mybatis sqls.\n\n Returns:\n A list of :class:`SQL`. For example:\n [SQL('', u'select a.id, b.name from db.ac a join db.bc b on a.id=b.id or a.id=b.iid where a.cnt > 10')]\n\n "
sqls = []
for (root, dirs, files) in os.walk(self.dir):
for file in files:
if (not file.endswith('.java')):
continue
with codecs.open(os.path.join(root, file), 'r', encoding=self.encoding) as f:
sqls.extend(MybatisInlineSqlExtractor.get_selects_from_text(MybatisInlineSqlExtractor.remove_comment(f.read())))
return sqls | This function extracts sqls from the java files with mybatis sqls.
Returns:
A list of :class:`SQL`. For example:
[SQL('', u'select a.id, b.name from db.ac a join db.bc b on a.id=b.id or a.id=b.iid where a.cnt > 10')] | sqlextractor/SqlExtractor.py | get_sqls | StefanLim0/mysql-er | 8 | python | def get_sqls(self):
"This function extracts sqls from the java files with mybatis sqls.\n\n Returns:\n A list of :class:`SQL`. For example:\n [SQL(, u'select a.id, b.name from db.ac a join db.bc b on a.id=b.id or a.id=b.iid where a.cnt > 10')]\n\n "
sqls = []
for (root, dirs, files) in os.walk(self.dir):
for file in files:
if (not file.endswith('.java')):
continue
with codecs.open(os.path.join(root, file), 'r', encoding=self.encoding) as f:
sqls.extend(MybatisInlineSqlExtractor.get_selects_from_text(MybatisInlineSqlExtractor.remove_comment(f.read())))
return sqls | def get_sqls(self):
"This function extracts sqls from the java files with mybatis sqls.\n\n Returns:\n A list of :class:`SQL`. For example:\n [SQL(, u'select a.id, b.name from db.ac a join db.bc b on a.id=b.id or a.id=b.iid where a.cnt > 10')]\n\n "
sqls = []
for (root, dirs, files) in os.walk(self.dir):
for file in files:
if (not file.endswith('.java')):
continue
with codecs.open(os.path.join(root, file), 'r', encoding=self.encoding) as f:
sqls.extend(MybatisInlineSqlExtractor.get_selects_from_text(MybatisInlineSqlExtractor.remove_comment(f.read())))
return sqls<|docstring|>This function extracts sqls from the java files with mybatis sqls.
Returns:
A list of :class:`SQL`. For example:
[SQL('', u'select a.id, b.name from db.ac a join db.bc b on a.id=b.id or a.id=b.iid where a.cnt > 10')]<|endoftext|> |
2aea7a148ca596e961fa4be4c2dc0b2e4d329194b63ba7bf3b52c2b449e9976a | def __init__(self, log_path):
'Constructor.\n\n Args:\n log_path (str): mysql general log file path.\n\n '
self.log_path = log_path | Constructor.
Args:
log_path (str): mysql general log file path. | sqlextractor/SqlExtractor.py | __init__ | StefanLim0/mysql-er | 8 | python | def __init__(self, log_path):
'Constructor.\n\n Args:\n log_path (str): mysql general log file path.\n\n '
self.log_path = log_path | def __init__(self, log_path):
'Constructor.\n\n Args:\n log_path (str): mysql general log file path.\n\n '
self.log_path = log_path<|docstring|>Constructor.
Args:
log_path (str): mysql general log file path.<|endoftext|> |
36bba749ea01bb8ad83a6b2a066f0464464a507d917115cfc762e10c0ef88291 | def get_sqls(self):
"This function extracts sqls from mysql general log file.\n\n\n Returns:\n A list of :class:`SQL`. For example:\n [SQL('', u'select a.id, b.name from db.ac a join db.bc b on a.id=b.id or a.id=b.iid where a.cnt > 10')]\n\n "
general_log = open(self.log_path)
log = GeneralQueryLog(general_log)
session_db_map = {}
sqls = []
for entry in log:
if (entry['command'] == 'Connect'):
m = re.search('\\s+on\\s(?P<name>\\w+)', entry['argument'])
if m:
session_db_map[entry['session_id']] = m.groupdict()['name'].strip()
elif (entry['command'] == 'Init DB'):
session_db_map[entry['session_id']] = entry['argument'].strip()
elif (entry['command'] == 'Query'):
sql = entry['argument']
if (sql.strip()[:6].lower() == 'select'):
(yield SQL(session_db_map.get(entry['session_id'], ''), sql)) | This function extracts sqls from mysql general log file.
Returns:
A list of :class:`SQL`. For example:
[SQL('', u'select a.id, b.name from db.ac a join db.bc b on a.id=b.id or a.id=b.iid where a.cnt > 10')] | sqlextractor/SqlExtractor.py | get_sqls | StefanLim0/mysql-er | 8 | python | def get_sqls(self):
"This function extracts sqls from mysql general log file.\n\n\n Returns:\n A list of :class:`SQL`. For example:\n [SQL(, u'select a.id, b.name from db.ac a join db.bc b on a.id=b.id or a.id=b.iid where a.cnt > 10')]\n\n "
general_log = open(self.log_path)
log = GeneralQueryLog(general_log)
session_db_map = {}
sqls = []
for entry in log:
if (entry['command'] == 'Connect'):
m = re.search('\\s+on\\s(?P<name>\\w+)', entry['argument'])
if m:
session_db_map[entry['session_id']] = m.groupdict()['name'].strip()
elif (entry['command'] == 'Init DB'):
session_db_map[entry['session_id']] = entry['argument'].strip()
elif (entry['command'] == 'Query'):
sql = entry['argument']
if (sql.strip()[:6].lower() == 'select'):
(yield SQL(session_db_map.get(entry['session_id'], ), sql)) | def get_sqls(self):
"This function extracts sqls from mysql general log file.\n\n\n Returns:\n A list of :class:`SQL`. For example:\n [SQL(, u'select a.id, b.name from db.ac a join db.bc b on a.id=b.id or a.id=b.iid where a.cnt > 10')]\n\n "
general_log = open(self.log_path)
log = GeneralQueryLog(general_log)
session_db_map = {}
sqls = []
for entry in log:
if (entry['command'] == 'Connect'):
m = re.search('\\s+on\\s(?P<name>\\w+)', entry['argument'])
if m:
session_db_map[entry['session_id']] = m.groupdict()['name'].strip()
elif (entry['command'] == 'Init DB'):
session_db_map[entry['session_id']] = entry['argument'].strip()
elif (entry['command'] == 'Query'):
sql = entry['argument']
if (sql.strip()[:6].lower() == 'select'):
(yield SQL(session_db_map.get(entry['session_id'], ), sql))<|docstring|>This function extracts sqls from mysql general log file.
Returns:
A list of :class:`SQL`. For example:
[SQL('', u'select a.id, b.name from db.ac a join db.bc b on a.id=b.id or a.id=b.iid where a.cnt > 10')]<|endoftext|> |
096fb0ed7032c759166ab34b21f9b6a4fc3f8ac12c3864efab7203a7b478af6d | def is_anagram(s, t):
'\n Using an hash table, no sorting necessary. We iterate on the the two\n strings only, the time complexity is 2N -> N, but you need memory for\n the hash table.\n\n Returns true or false\n '
ht = {}
if ((s is None) or (t is None) or (len(s) != len(t))):
return False
for c in s:
times = ht.setdefault(c, 0)
ht[c] = (times + 1)
for c in t:
if (not ht.has_key(c)):
return False
elif (ht[c] > 1):
ht[c] = (ht[c] - 1)
else:
ht.pop(c)
return (ht.keys() == []) | Using an hash table, no sorting necessary. We iterate on the the two
strings only, the time complexity is 2N -> N, but you need memory for
the hash table.
Returns true or false | anagram.py | is_anagram | gl051/unit-test-demo | 0 | python | def is_anagram(s, t):
'\n Using an hash table, no sorting necessary. We iterate on the the two\n strings only, the time complexity is 2N -> N, but you need memory for\n the hash table.\n\n Returns true or false\n '
ht = {}
if ((s is None) or (t is None) or (len(s) != len(t))):
return False
for c in s:
times = ht.setdefault(c, 0)
ht[c] = (times + 1)
for c in t:
if (not ht.has_key(c)):
return False
elif (ht[c] > 1):
ht[c] = (ht[c] - 1)
else:
ht.pop(c)
return (ht.keys() == []) | def is_anagram(s, t):
'\n Using an hash table, no sorting necessary. We iterate on the the two\n strings only, the time complexity is 2N -> N, but you need memory for\n the hash table.\n\n Returns true or false\n '
ht = {}
if ((s is None) or (t is None) or (len(s) != len(t))):
return False
for c in s:
times = ht.setdefault(c, 0)
ht[c] = (times + 1)
for c in t:
if (not ht.has_key(c)):
return False
elif (ht[c] > 1):
ht[c] = (ht[c] - 1)
else:
ht.pop(c)
return (ht.keys() == [])<|docstring|>Using an hash table, no sorting necessary. We iterate on the the two
strings only, the time complexity is 2N -> N, but you need memory for
the hash table.
Returns true or false<|endoftext|> |
fcaef34e9cb398667e48bb9e9b562a447737a37f141e3ca0d96f6c701e7381b2 | def __init__(self):
'Classify Tool'
self.label = '3 Finding Centers and Classification Tool'
self.description = 'Find Centers and Classify for HiSpatialCluster.'
self.canRunInBackground = True
self.cntr_addr = ''
self.cls_addr = '' | Classify Tool | tool_clswithcntr.py | __init__ | lopp2005/HiSpatialCluster | 32 | python | def __init__(self):
self.label = '3 Finding Centers and Classification Tool'
self.description = 'Find Centers and Classify for HiSpatialCluster.'
self.canRunInBackground = True
self.cntr_addr =
self.cls_addr = | def __init__(self):
self.label = '3 Finding Centers and Classification Tool'
self.description = 'Find Centers and Classify for HiSpatialCluster.'
self.canRunInBackground = True
self.cntr_addr =
self.cls_addr = <|docstring|>Classify Tool<|endoftext|> |
97b0c2cb0daaa2403efd6de0ee18d58e98d0a01affb71894c1720006df19a844 | def getParameterInfo(self):
'Define parameter definitions'
paraminput = Parameter(displayName='Input Points', name='in_points', datatype='DEFeatureClass', parameterType='Required', direction='Input')
paraminput.filter.list = ['Point']
paramidfield = Parameter(displayName='Identifier Field', name='id_field', datatype='Field', parameterType='Required', direction='Input')
paramidfield.parameterDependencies = [paraminput.name]
paramidfield.filter.list = ['Short', 'Long']
paramparentidfield = Parameter(displayName='Parent ID Field', name='parent_id_field', datatype='Field', parameterType='Required', direction='Input')
paramparentidfield.parameterDependencies = [paraminput.name]
paramparentidfield.filter.list = ['Short', 'Long']
paramparentidfield.value = 'PARENTID'
parammultifield = Parameter(displayName='Multiply Field', name='multi_field', datatype='Field', parameterType='Required', direction='Input')
parammultifield.filter.list = ['Short', 'Long', 'Float', 'Single', 'Double']
parammultifield.parameterDependencies = [paraminput.name]
parammultifield.value = 'MULTIPLY'
paramcntroutput = Parameter(displayName='Output Center Points', name='out_cntr_points', datatype='DEFeatureClass', parameterType='Required', direction='Output')
paramclsoutput = Parameter(displayName='Output Classified Points', name='out_cls_points', datatype='DEFeatureClass', parameterType='Required', direction='Output')
paramcntrnum = Parameter(displayName='Maxinum Number of Center Points', name='cntr_num', datatype='GPLong', parameterType='Required', direction='Input')
paramcntrnum.value = 100
params = [paraminput, paramidfield, paramparentidfield, parammultifield, paramcntrnum, paramcntroutput, paramclsoutput]
return params | Define parameter definitions | tool_clswithcntr.py | getParameterInfo | lopp2005/HiSpatialCluster | 32 | python | def getParameterInfo(self):
paraminput = Parameter(displayName='Input Points', name='in_points', datatype='DEFeatureClass', parameterType='Required', direction='Input')
paraminput.filter.list = ['Point']
paramidfield = Parameter(displayName='Identifier Field', name='id_field', datatype='Field', parameterType='Required', direction='Input')
paramidfield.parameterDependencies = [paraminput.name]
paramidfield.filter.list = ['Short', 'Long']
paramparentidfield = Parameter(displayName='Parent ID Field', name='parent_id_field', datatype='Field', parameterType='Required', direction='Input')
paramparentidfield.parameterDependencies = [paraminput.name]
paramparentidfield.filter.list = ['Short', 'Long']
paramparentidfield.value = 'PARENTID'
parammultifield = Parameter(displayName='Multiply Field', name='multi_field', datatype='Field', parameterType='Required', direction='Input')
parammultifield.filter.list = ['Short', 'Long', 'Float', 'Single', 'Double']
parammultifield.parameterDependencies = [paraminput.name]
parammultifield.value = 'MULTIPLY'
paramcntroutput = Parameter(displayName='Output Center Points', name='out_cntr_points', datatype='DEFeatureClass', parameterType='Required', direction='Output')
paramclsoutput = Parameter(displayName='Output Classified Points', name='out_cls_points', datatype='DEFeatureClass', parameterType='Required', direction='Output')
paramcntrnum = Parameter(displayName='Maxinum Number of Center Points', name='cntr_num', datatype='GPLong', parameterType='Required', direction='Input')
paramcntrnum.value = 100
params = [paraminput, paramidfield, paramparentidfield, parammultifield, paramcntrnum, paramcntroutput, paramclsoutput]
return params | def getParameterInfo(self):
paraminput = Parameter(displayName='Input Points', name='in_points', datatype='DEFeatureClass', parameterType='Required', direction='Input')
paraminput.filter.list = ['Point']
paramidfield = Parameter(displayName='Identifier Field', name='id_field', datatype='Field', parameterType='Required', direction='Input')
paramidfield.parameterDependencies = [paraminput.name]
paramidfield.filter.list = ['Short', 'Long']
paramparentidfield = Parameter(displayName='Parent ID Field', name='parent_id_field', datatype='Field', parameterType='Required', direction='Input')
paramparentidfield.parameterDependencies = [paraminput.name]
paramparentidfield.filter.list = ['Short', 'Long']
paramparentidfield.value = 'PARENTID'
parammultifield = Parameter(displayName='Multiply Field', name='multi_field', datatype='Field', parameterType='Required', direction='Input')
parammultifield.filter.list = ['Short', 'Long', 'Float', 'Single', 'Double']
parammultifield.parameterDependencies = [paraminput.name]
parammultifield.value = 'MULTIPLY'
paramcntroutput = Parameter(displayName='Output Center Points', name='out_cntr_points', datatype='DEFeatureClass', parameterType='Required', direction='Output')
paramclsoutput = Parameter(displayName='Output Classified Points', name='out_cls_points', datatype='DEFeatureClass', parameterType='Required', direction='Output')
paramcntrnum = Parameter(displayName='Maxinum Number of Center Points', name='cntr_num', datatype='GPLong', parameterType='Required', direction='Input')
paramcntrnum.value = 100
params = [paraminput, paramidfield, paramparentidfield, parammultifield, paramcntrnum, paramcntroutput, paramclsoutput]
return params<|docstring|>Define parameter definitions<|endoftext|> |
043c2ac87cf9983ea4de3170b83baf58675c6dff59b3a0d7b1ea99b75cbba825 | def register_plugin(collectd):
'Bind plugin hooks to collectd and viceversa'
config = Config.instance()
log_handler = CollectdLogHandler(collectd=collectd, config=config)
ROOT_LOGGER.addHandler(log_handler)
ROOT_LOGGER.setLevel(logging.DEBUG)
instance = Plugin(collectd=collectd, config=config)
collectd.register_init(instance.init)
collectd.register_config(instance.config)
collectd.register_write(instance.write)
collectd.register_shutdown(instance.shutdown) | Bind plugin hooks to collectd and viceversa | collectd_openstack/gnocchi/plugin.py | register_plugin | ChameleonCloud/collectd-ceilometer-plugin | 0 | python | def register_plugin(collectd):
config = Config.instance()
log_handler = CollectdLogHandler(collectd=collectd, config=config)
ROOT_LOGGER.addHandler(log_handler)
ROOT_LOGGER.setLevel(logging.DEBUG)
instance = Plugin(collectd=collectd, config=config)
collectd.register_init(instance.init)
collectd.register_config(instance.config)
collectd.register_write(instance.write)
collectd.register_shutdown(instance.shutdown) | def register_plugin(collectd):
config = Config.instance()
log_handler = CollectdLogHandler(collectd=collectd, config=config)
ROOT_LOGGER.addHandler(log_handler)
ROOT_LOGGER.setLevel(logging.DEBUG)
instance = Plugin(collectd=collectd, config=config)
collectd.register_init(instance.init)
collectd.register_config(instance.config)
collectd.register_write(instance.write)
collectd.register_shutdown(instance.shutdown)<|docstring|>Bind plugin hooks to collectd and viceversa<|endoftext|> |
e12f29709a7d9c396fa055ca6d682fc8bbb38b37ce1f33b38178e9db90446b66 | def config(self, cfg):
'Configuration callback\n\n @param cfg configuration node provided by collectd\n '
self._config.read(cfg) | Configuration callback
@param cfg configuration node provided by collectd | collectd_openstack/gnocchi/plugin.py | config | ChameleonCloud/collectd-ceilometer-plugin | 0 | python | def config(self, cfg):
'Configuration callback\n\n @param cfg configuration node provided by collectd\n '
self._config.read(cfg) | def config(self, cfg):
'Configuration callback\n\n @param cfg configuration node provided by collectd\n '
self._config.read(cfg)<|docstring|>Configuration callback
@param cfg configuration node provided by collectd<|endoftext|> |
84e9d0dc7c4c164bdc290cf4aa9ea1a9e73c2a8e41d3342dbe45ae4a860d6604 | def init(self):
'Initialization callback'
collectd.info('Initializing the collectd OpenStack python plugin')
self._meters = MeterStorage(collectd=collectd) | Initialization callback | collectd_openstack/gnocchi/plugin.py | init | ChameleonCloud/collectd-ceilometer-plugin | 0 | python | def init(self):
collectd.info('Initializing the collectd OpenStack python plugin')
self._meters = MeterStorage(collectd=collectd) | def init(self):
collectd.info('Initializing the collectd OpenStack python plugin')
self._meters = MeterStorage(collectd=collectd)<|docstring|>Initialization callback<|endoftext|> |
d39787e409874eef359b04a3cdcd7c767c7bc2eaadc99fbffa0503da21eb89f1 | def write(self, vl, data=None):
'Collectd write callback'
self._writer.write(vl, data) | Collectd write callback | collectd_openstack/gnocchi/plugin.py | write | ChameleonCloud/collectd-ceilometer-plugin | 0 | python | def write(self, vl, data=None):
self._writer.write(vl, data) | def write(self, vl, data=None):
self._writer.write(vl, data)<|docstring|>Collectd write callback<|endoftext|> |
573f2fda9b657da9b9ca2eb28904508e9f3558e9bd2fcb67f6d14e10a5b99658 | def shutdown(self):
'Shutdown callback'
LOGGER.info('SHUTDOWN')
self._writer.flush() | Shutdown callback | collectd_openstack/gnocchi/plugin.py | shutdown | ChameleonCloud/collectd-ceilometer-plugin | 0 | python | def shutdown(self):
LOGGER.info('SHUTDOWN')
self._writer.flush() | def shutdown(self):
LOGGER.info('SHUTDOWN')
self._writer.flush()<|docstring|>Shutdown callback<|endoftext|> |
14406d859f77d4961dfdc2c8025081debf8e5c42ed4d0a5f266949de247ebed6 | def filter_away_extreme_slopes(input_data, slope_col, max_slope, filter_width=0):
'\n Filter away everywhere where slope is over max_slope, plus surrounding entries\n :type input_data: numpy.ndarray\n :type slope_col: int or slice or list[int]\n :type max_slope: float\n :type filter_width: int\n :return:\n '
import numpy as np
indices = np.less(np.abs(input_data[(:, slope_col)]), max_slope)
widened_indices = np.convolve(np.reshape(indices, len(indices)), np.ones(filter_width, dtype=bool), mode='same')
return input_data[(widened_indices, :)] | Filter away everywhere where slope is over max_slope, plus surrounding entries
:type input_data: numpy.ndarray
:type slope_col: int or slice or list[int]
:type max_slope: float
:type filter_width: int
:return: | ModelPredictiveControl/prune_output_matrices.py | filter_away_extreme_slopes | EWannerberg/AutomaticHeuristicGeneration | 0 | python | def filter_away_extreme_slopes(input_data, slope_col, max_slope, filter_width=0):
'\n Filter away everywhere where slope is over max_slope, plus surrounding entries\n :type input_data: numpy.ndarray\n :type slope_col: int or slice or list[int]\n :type max_slope: float\n :type filter_width: int\n :return:\n '
import numpy as np
indices = np.less(np.abs(input_data[(:, slope_col)]), max_slope)
widened_indices = np.convolve(np.reshape(indices, len(indices)), np.ones(filter_width, dtype=bool), mode='same')
return input_data[(widened_indices, :)] | def filter_away_extreme_slopes(input_data, slope_col, max_slope, filter_width=0):
'\n Filter away everywhere where slope is over max_slope, plus surrounding entries\n :type input_data: numpy.ndarray\n :type slope_col: int or slice or list[int]\n :type max_slope: float\n :type filter_width: int\n :return:\n '
import numpy as np
indices = np.less(np.abs(input_data[(:, slope_col)]), max_slope)
widened_indices = np.convolve(np.reshape(indices, len(indices)), np.ones(filter_width, dtype=bool), mode='same')
return input_data[(widened_indices, :)]<|docstring|>Filter away everywhere where slope is over max_slope, plus surrounding entries
:type input_data: numpy.ndarray
:type slope_col: int or slice or list[int]
:type max_slope: float
:type filter_width: int
:return:<|endoftext|> |
efe4de9c8c58bbfc92baeea323f6336ea02164075b2e33e2b8ba054788749f7f | @callback
def _create_abort_data(flow_id: str, handler: str, reason: str, description_placeholders: Optional[Dict]=None) -> Dict[(str, Any)]:
'Return the definition of an external step for the user to take.'
return {'type': RESULT_TYPE_ABORT, 'flow_id': flow_id, 'handler': handler, 'reason': reason, 'description_placeholders': description_placeholders} | Return the definition of an external step for the user to take. | homeassistant/data_entry_flow.py | _create_abort_data | mockersf/home-assistant | 23 | python | @callback
def _create_abort_data(flow_id: str, handler: str, reason: str, description_placeholders: Optional[Dict]=None) -> Dict[(str, Any)]:
return {'type': RESULT_TYPE_ABORT, 'flow_id': flow_id, 'handler': handler, 'reason': reason, 'description_placeholders': description_placeholders} | @callback
def _create_abort_data(flow_id: str, handler: str, reason: str, description_placeholders: Optional[Dict]=None) -> Dict[(str, Any)]:
return {'type': RESULT_TYPE_ABORT, 'flow_id': flow_id, 'handler': handler, 'reason': reason, 'description_placeholders': description_placeholders}<|docstring|>Return the definition of an external step for the user to take.<|endoftext|> |
fc6b1bf0892aa1de70b4e7486dcd26f7a730dd8c2a9e6c821918bb7a89ab3cbe | def __init__(self, reason: str, description_placeholders: Optional[Dict]=None):
'Initialize an abort flow exception.'
super().__init__(f'Flow aborted: {reason}')
self.reason = reason
self.description_placeholders = description_placeholders | Initialize an abort flow exception. | homeassistant/data_entry_flow.py | __init__ | mockersf/home-assistant | 23 | python | def __init__(self, reason: str, description_placeholders: Optional[Dict]=None):
super().__init__(f'Flow aborted: {reason}')
self.reason = reason
self.description_placeholders = description_placeholders | def __init__(self, reason: str, description_placeholders: Optional[Dict]=None):
super().__init__(f'Flow aborted: {reason}')
self.reason = reason
self.description_placeholders = description_placeholders<|docstring|>Initialize an abort flow exception.<|endoftext|> |
9fa342b976bcca5bde7d7a03c3fbd2e817f5c0e8b9ea76a98b4830b021471f85 | def __init__(self, hass: HomeAssistant, async_create_flow: Callable, async_finish_flow: Callable) -> None:
'Initialize the flow manager.'
self.hass = hass
self._progress: Dict[(str, Any)] = {}
self._async_create_flow = async_create_flow
self._async_finish_flow = async_finish_flow | Initialize the flow manager. | homeassistant/data_entry_flow.py | __init__ | mockersf/home-assistant | 23 | python | def __init__(self, hass: HomeAssistant, async_create_flow: Callable, async_finish_flow: Callable) -> None:
self.hass = hass
self._progress: Dict[(str, Any)] = {}
self._async_create_flow = async_create_flow
self._async_finish_flow = async_finish_flow | def __init__(self, hass: HomeAssistant, async_create_flow: Callable, async_finish_flow: Callable) -> None:
self.hass = hass
self._progress: Dict[(str, Any)] = {}
self._async_create_flow = async_create_flow
self._async_finish_flow = async_finish_flow<|docstring|>Initialize the flow manager.<|endoftext|> |
9838b985c459dfaaf8b3cededca4aee3fe0375adceb88b2296ea23994591dbff | @callback
def async_progress(self) -> List[Dict]:
'Return the flows in progress.'
return [{'flow_id': flow.flow_id, 'handler': flow.handler, 'context': flow.context} for flow in self._progress.values()] | Return the flows in progress. | homeassistant/data_entry_flow.py | async_progress | mockersf/home-assistant | 23 | python | @callback
def async_progress(self) -> List[Dict]:
return [{'flow_id': flow.flow_id, 'handler': flow.handler, 'context': flow.context} for flow in self._progress.values()] | @callback
def async_progress(self) -> List[Dict]:
return [{'flow_id': flow.flow_id, 'handler': flow.handler, 'context': flow.context} for flow in self._progress.values()]<|docstring|>Return the flows in progress.<|endoftext|> |
d6a8b9ae828d7ed515ae538919630ca8ad84098b63cc8e8c0dc5ce78e328305f | async def async_init(self, handler: str, *, context: Optional[Dict]=None, data: Any=None) -> Any:
'Start a configuration flow.'
if (context is None):
context = {}
flow = (await self._async_create_flow(handler, context=context, data=data))
flow.hass = self.hass
flow.handler = handler
flow.flow_id = uuid.uuid4().hex
flow.context = context
self._progress[flow.flow_id] = flow
return (await self._async_handle_step(flow, flow.init_step, data)) | Start a configuration flow. | homeassistant/data_entry_flow.py | async_init | mockersf/home-assistant | 23 | python | async def async_init(self, handler: str, *, context: Optional[Dict]=None, data: Any=None) -> Any:
if (context is None):
context = {}
flow = (await self._async_create_flow(handler, context=context, data=data))
flow.hass = self.hass
flow.handler = handler
flow.flow_id = uuid.uuid4().hex
flow.context = context
self._progress[flow.flow_id] = flow
return (await self._async_handle_step(flow, flow.init_step, data)) | async def async_init(self, handler: str, *, context: Optional[Dict]=None, data: Any=None) -> Any:
if (context is None):
context = {}
flow = (await self._async_create_flow(handler, context=context, data=data))
flow.hass = self.hass
flow.handler = handler
flow.flow_id = uuid.uuid4().hex
flow.context = context
self._progress[flow.flow_id] = flow
return (await self._async_handle_step(flow, flow.init_step, data))<|docstring|>Start a configuration flow.<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.