rem
stringlengths 1
322k
| add
stringlengths 0
2.05M
| context
stringlengths 4
228k
| meta
stringlengths 156
215
|
---|---|---|---|
twopercent_resourcetemplate = onepercentmanyevents_resource_fd.read() | twopercent_resourcetemplate = twopercent_resource_fd.read() | def main(): """ <Purpose> The main function that calls the process_nodes_and_change_state() function in the node_transition_lib passing in the process and error functions. <Arguments> None <Exceptions> None <Side Effects> None """ # Open and read the resource file that is necessary for twopercent vessels. # This will determine how the vessels will be split and how much resource # will be allocated to each vessel. twopercent_resource_fd = file(RESOURCES_TEMPLATE_FILE_PATH) twopercent_resourcetemplate = onepercentmanyevents_resource_fd.read() twopercent_resource_fd.close() # We are going to transition all the nodes that are in the canonical state # to the twopercent state. We are going to do this in three different # state. First we are going to transition all the canonical state nodes # to the movingto_twopercent state with a no-op function. The reason for # this is, so if anything goes wrong, we can revert back. # In the second step we are going to attempt to move all the nodes in the # movingto_twopercent state to the twopercent state. The way to do this, is # we are going to split the vessels by giving each vessel the resources # that are described in the resource template. # Next we are going to try to transition all the nodes in the # movingto_twopercent state to the canonical state. Any nodes that failed # to go to the twopercent are still stuck in the movingto_twopercent state, # and we want to move them back to the canonical state. # Variables that determine weather to mark a node inactive or not. mark_node_inactive = False mark_node_active = True state_function_arg_tuplelist = [ ("canonical", "movingto_twopercent", node_transition_lib.noop, node_transition_lib.noop, mark_node_inactive), ("movingto_twopercent", "twopercent", node_transition_lib.split_vessels, node_transition_lib.noop, mark_node_active, twopercent_resourcetemplate), ("movingto_twopercent", "canonical", node_transition_lib.combine_vessels, node_transition_lib.noop, mark_node_inactive)] sleeptime = 10 process_name = "canonical_to_twopercent" parallel_instances = 10 #call process_nodes_and_change_state() to start the node state transition node_transition_lib.process_nodes_and_change_state(state_function_arg_tuplelist, process_name, sleeptime, parallel_instances) | 2fd89eaf9e7ab7a5c79cc115ccb013a1b97ab34d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7995/2fd89eaf9e7ab7a5c79cc115ccb013a1b97ab34d/transition_canonical_to_twopercent.py |
for possibleport in configuration['ports']: try: servicelogger.log("[INFO]: Trying to wait") shimstack.waitforconn(unique_id, possibleport, nmconnectionmanager.connection_handler) except Exception, e: servicelogger.log("[ERROR]: when calling waitforconn for the connection_handler: " + str(e)) servicelogger.log_last_exception() else: acceptor_state['lock'].acquire() acceptor_state['started']= True acceptor_state['lock'].release() myname = unique_id + ":" + str(possibleport) servicelogger.log("[INFO]: Now listening as " + myname) break | if not private_key_string: raise TypeError("private_key_string must be provided if api_key is not") if not isinstance(private_key_string, basestring): raise TypeError("private_key_string must be a string") if not isinstance(xmlrpc_url, basestring): raise TypeError("xmlrpc_url must be a string") if not isinstance(allow_ssl_insecure, bool): raise TypeError("allow_ssl_insecure must be True or False") if not isinstance(ca_certs_file, basestring): raise TypeError("ca_certs_file must be a string") if allow_ssl_insecure: self.proxy = xmlrpclib.Server(xmlrpc_url) else: ssl_transport = _get_ssl_transport(ca_certs_file) self.proxy = xmlrpclib.Server(xmlrpc_url, transport=ssl_transport) if not api_key: api_key = self._get_api_key(username, private_key_string) self.auth = {'username':username, 'api_key':api_key} def _get_api_key(self, username, private_key_string): try: import repyhelper import repyportability repyhelper.translate_and_import("rsa.repy") except ImportError, e: raise SeattleGENIError("Unable to get API key from SeattleGENI " + "because a required python or repy module " + "cannot be found:" + str(e)) private_key_dict = rsa_string_to_privatekey(private_key_string) encrypted_data = self.proxy.get_encrypted_api_key(username) decrypted_data = rsa_decrypt(encrypted_data, private_key_dict) split_data = decrypted_data.split("!") if len(split_data) != 2 or len(split_data[0]) != 20: raise AuthenticationError("The provided private key does not appear " + "to correspond to this account's public key: " + "encrypted API key could not be decrypted.") api_key = split_data[1] return api_key def _do_call(self, function, *args): try: return function(self.auth, *args) except socket.error, err: raise CommunicationError("XMLRPC failed: " + str(err)) except xmlrpclib.Fault, fault: if fault.faultCode == FAULTCODE_AUTHERROR: raise AuthenticationError elif fault.faultCode == FAULTCODE_INVALIDREQUEST: raise InvalidRequestError(fault.faultString) elif fault.faultCode == FAULTCODE_NOTENOUGHCREDITS: raise NotEnoughCreditsError(fault.faultString) elif fault.faultCode == FAULTCODE_UNABLETOACQUIRE: raise UnableToAcquireResourcesError(fault.faultString) | def start_accepter(): shimstack = ShimStackInterface('(RSAShim)(NatDeciderShim)') unique_id = rsa_publickey_to_string(configuration['publickey']) unique_id = sha_hexhash(unique_id) + str(configuration['service_vessel']) # do this until we get the accepter started... while True: if is_accepter_started(): # we're done, return the name! return myname else: for possibleport in configuration['ports']: try: servicelogger.log("[INFO]: Trying to wait") shimstack.waitforconn(unique_id, possibleport, nmconnectionmanager.connection_handler) except Exception, e: servicelogger.log("[ERROR]: when calling waitforconn for the connection_handler: " + str(e)) servicelogger.log_last_exception() else: # the waitforconn was completed so the acceptor is started acceptor_state['lock'].acquire() acceptor_state['started']= True acceptor_state['lock'].release() # assign the nodemanager name myname = unique_id + ":" + str(possibleport) servicelogger.log("[INFO]: Now listening as " + myname) break else: servicelogger.log("[ERROR]: cannot find a port for waitforconn.") # check infrequently time.sleep(configuration['pollfrequency']) | 99fb6e1ca74de6038aa668a9cd8075a52e5e9538 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7995/99fb6e1ca74de6038aa668a9cd8075a52e5e9538/addme.py |
servicelogger.log("[ERROR]: cannot find a port for waitforconn.") time.sleep(configuration['pollfrequency']) def is_worker_thread_started(): for thread in threading.enumerate(): if 'WorkerThread' in str(thread): return True else: return False def start_worker_thread(sleeptime): if not is_worker_thread_started(): workerthread = nmconnectionmanager.WorkerThread(sleeptime) workerthread.setDaemon(True) workerthread.start() def is_advert_thread_started(): for thread in threading.enumerate(): if 'Advertisement Thread' in str(thread): return True else: return False def start_advert_thread(vesseldict, myname, nodekey): if should_start_waitable_thread('advert','Advertisement Thread'): advertthread = nmadvertise.advertthread(vesseldict, nodekey) nmadvertise.myname = myname advertthread.setDaemon(True) advertthread.start() started_waitable_thread('advert') def is_status_thread_started(): for thread in threading.enumerate(): if 'Status Monitoring Thread' in str(thread): return True else: return False def start_status_thread(vesseldict,sleeptime): if should_start_waitable_thread('status','Status Monitoring Thread'): statusthread = nmstatusmonitor.statusthread(vesseldict, sleeptime, nmAPI) statusthread.setDaemon(True) statusthread.start() started_waitable_thread('status') def main(): global configuration if not FOREGROUND: daemon.daemonize() gotlock = runonce.getprocesslock("seattlenodemanager") if gotlock == True: pass else: if gotlock: servicelogger.log("[ERROR]:Another node manager process (pid: " + str(gotlock) + ") is running") else: servicelogger.log("[ERROR]:Another node manager process is running") return servicelogger.log("[INFO]:Loading config") configuration = persist.restore_object("nodeman.cfg") initialize_ip_interface_restrictions(configuration) if nonportable.ostype == 'Linux' or nonportable.ostype == 'Darwin': if 'crontab_updated_for_2009_installer' not in configuration or \ configuration['crontab_updated_for_2009_installer'] == False: try: import update_crontab_entry modified_crontab_entry = \ update_crontab_entry.modify_seattle_crontab_entry() if modified_crontab_entry: configuration['crontab_updated_for_2009_installer'] = True persist.commit_object(configuration,"nodeman.cfg") except Exception,e: exception_traceback_string = traceback.format_exc() servicelogger.log("[ERROR]: The following error occured when " \ + "modifying the crontab for the new 2009 " \ + "seattle crontab entry: " \ + exception_traceback_string) myip = None while True: | raise InternalError(fault.faultString) def _do_pwauth_call(self, function, password, *args): """For use by calls that require a password rather than an api key.""" pwauth = {'username':self.auth['username'], 'password':password} | def start_accepter(): shimstack = ShimStackInterface('(RSAShim)(NatDeciderShim)') unique_id = rsa_publickey_to_string(configuration['publickey']) unique_id = sha_hexhash(unique_id) + str(configuration['service_vessel']) # do this until we get the accepter started... while True: if is_accepter_started(): # we're done, return the name! return myname else: for possibleport in configuration['ports']: try: servicelogger.log("[INFO]: Trying to wait") shimstack.waitforconn(unique_id, possibleport, nmconnectionmanager.connection_handler) except Exception, e: servicelogger.log("[ERROR]: when calling waitforconn for the connection_handler: " + str(e)) servicelogger.log_last_exception() else: # the waitforconn was completed so the acceptor is started acceptor_state['lock'].acquire() acceptor_state['started']= True acceptor_state['lock'].release() # assign the nodemanager name myname = unique_id + ":" + str(possibleport) servicelogger.log("[INFO]: Now listening as " + myname) break else: servicelogger.log("[ERROR]: cannot find a port for waitforconn.") # check infrequently time.sleep(configuration['pollfrequency']) | 99fb6e1ca74de6038aa668a9cd8075a52e5e9538 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7995/99fb6e1ca74de6038aa668a9cd8075a52e5e9538/addme.py |
myip = emulcomm.getmyip() except Exception, e: if len(e.args) >= 1 and e.args[0] == "Cannot detect a connection to the Internet.": pass | return function(pwauth, *args) except socket.error, err: raise CommunicationError("XMLRPC failed: " + str(err)) except xmlrpclib.Fault, fault: if fault.faultCode == FAULTCODE_AUTHERROR: raise AuthenticationError elif fault.faultCode == FAULTCODE_INVALIDREQUEST: raise InvalidRequestError(fault.faultString) elif fault.faultCode == FAULTCODE_NOTENOUGHCREDITS: raise NotEnoughCreditsError(fault.faultString) elif fault.faultCode == FAULTCODE_UNABLETOACQUIRE: raise UnableToAcquireResourcesError(fault.faultString) | def main(): global configuration if not FOREGROUND: # Background ourselves. daemon.daemonize() # ensure that only one instance is running at a time... gotlock = runonce.getprocesslock("seattlenodemanager") if gotlock == True: # I got the lock. All is well... pass else: if gotlock: servicelogger.log("[ERROR]:Another node manager process (pid: " + str(gotlock) + ") is running") else: servicelogger.log("[ERROR]:Another node manager process is running") return # I'll grab the necessary information first... servicelogger.log("[INFO]:Loading config") # BUG: Do this better? Is this the right way to engineer this? configuration = persist.restore_object("nodeman.cfg") # Armon: initialize the network restrictions initialize_ip_interface_restrictions(configuration) # ZACK BOKA: For Linux and Darwin systems, check to make sure that the new # seattle crontab entry has been installed in the crontab. # Do this here because the "nodeman.cfg" needs to have been read # into configuration via the persist module. if nonportable.ostype == 'Linux' or nonportable.ostype == 'Darwin': if 'crontab_updated_for_2009_installer' not in configuration or \ configuration['crontab_updated_for_2009_installer'] == False: try: import update_crontab_entry modified_crontab_entry = \ update_crontab_entry.modify_seattle_crontab_entry() # If updating the seattle crontab entry succeeded, then update the # 'crontab_updated_for_2009_installer' so the nodemanager no longer # tries to update the crontab entry when it starts up. if modified_crontab_entry: configuration['crontab_updated_for_2009_installer'] = True persist.commit_object(configuration,"nodeman.cfg") except Exception,e: exception_traceback_string = traceback.format_exc() servicelogger.log("[ERROR]: The following error occured when " \ + "modifying the crontab for the new 2009 " \ + "seattle crontab entry: " \ + exception_traceback_string) # get the external IP address... # BUG: What if my external IP changes? (A problem throughout) myip = None while True: try: # Try to find our external IP. myip = emulcomm.getmyip() except Exception, e: # If we aren't connected to the internet, emulcomm.getmyip() raises this: if len(e.args) >= 1 and e.args[0] == "Cannot detect a connection to the Internet.": # So we try again. pass else: # It wasn't emulcomm.getmyip()'s exception. re-raise. raise else: # We succeeded in getting our external IP. Leave the loop. break time.sleep(0.1) vesseldict = nmrequesthandler.initialize(myip, configuration['publickey'], version) # Start accepter... myname = start_accepter() #send our advertised name to the log servicelogger.log('myname = '+str(myname)) # Start worker thread... start_worker_thread(configuration['pollfrequency']) # Start advert thread... start_advert_thread(vesseldict, myname, configuration['publickey']) # Start status thread... start_status_thread(vesseldict,configuration['pollfrequency']) # we should be all set up now. servicelogger.log("[INFO]:Started") # I will count my iterations through the loop so that I can log a message # periodically. This makes it clear I am alive. times_through_the_loop = 0 # BUG: Need to exit all when we're being upgraded while True: # E.K Previous there was a check to ensure that the acceptor # thread was started. There is no way to actually check this # and this code was never executed, so i removed it completely if not is_worker_thread_started(): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting worker...") start_worker_thread(configuration['pollfrequency']) if should_start_waitable_thread('advert','Advertisement Thread'): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting advert...") start_advert_thread(vesseldict, myname, configuration['publickey']) if should_start_waitable_thread('status','Status Monitoring Thread'): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting status...") start_status_thread(vesseldict,configuration['pollfrequency']) if not runonce.stillhaveprocesslock("seattlenodemanager"): servicelogger.log("[ERROR]:The node manager lost the process lock...") harshexit.harshexit(55) time.sleep(configuration['pollfrequency']) # if I've been through the loop enough times, log this... times_through_the_loop = times_through_the_loop + 1 if times_through_the_loop % LOG_AFTER_THIS_MANY_ITERATIONS == 0: servicelogger.log("[INFO]: node manager is alive...") | 99fb6e1ca74de6038aa668a9cd8075a52e5e9538 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7995/99fb6e1ca74de6038aa668a9cd8075a52e5e9538/addme.py |
raise else: break time.sleep(0.1) vesseldict = nmrequesthandler.initialize(myip, configuration['publickey'], version) myname = start_accepter() servicelogger.log('myname = '+str(myname)) start_worker_thread(configuration['pollfrequency']) start_advert_thread(vesseldict, myname, configuration['publickey']) start_status_thread(vesseldict,configuration['pollfrequency']) servicelogger.log("[INFO]:Started") times_through_the_loop = 0 while True: if not is_worker_thread_started(): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting worker...") start_worker_thread(configuration['pollfrequency']) if should_start_waitable_thread('advert','Advertisement Thread'): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting advert...") start_advert_thread(vesseldict, myname, configuration['publickey']) if should_start_waitable_thread('status','Status Monitoring Thread'): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting status...") start_status_thread(vesseldict,configuration['pollfrequency']) if not runonce.stillhaveprocesslock("seattlenodemanager"): servicelogger.log("[ERROR]:The node manager lost the process lock...") harshexit.harshexit(55) time.sleep(configuration['pollfrequency']) times_through_the_loop = times_through_the_loop + 1 if times_through_the_loop % LOG_AFTER_THIS_MANY_ITERATIONS == 0: servicelogger.log("[INFO]: node manager is alive...") if __name__ == '__main__': for arg in sys.argv[1:]: if arg == '-nat': AUTO_USE_NAT = True if arg == '--foreground': FOREGROUND = True servicelogger.init('nodemanager') | raise InternalError(fault.faultString) def acquire_lan_resources(self, count): """ <Purpose> Acquire LAN vessels. <Arguments> count The number of vessels to acquire. <Exceptions> The common exceptions described in the module comments, as well as: SeattleGENINotEnoughCredits If the account does not have enough available vessel credits to fulfill the request. <Side Effects> If successful, 'count' LAN vessels have been acquired for the account. <Returns> A list of vessel handles of the acquired vessels. """ return self.acquire_resources('lan', count) def acquire_wan_resources(self, count): """ <Purpose> Acquire WAN vessels. <Arguments> count The number of vessels to acquire. <Exceptions> The common exceptions described in the module comments, as well as: SeattleGENINotEnoughCredits If the account does not have enough available vessel credits to fulfill the request. <Side Effects> If successful, 'count' WAN vessels have been acquired for the account. <Returns> A list of vessel handles of the acquired vessels. """ return self.acquire_resources('wan', count) def acquire_nat_resources(self, count): """ <Purpose> Acquire NAT vessels. <Arguments> count The number of vessels to acquire. <Exceptions> The common exceptions described in the module comments, as well as: SeattleGENINotEnoughCredits If the account does not have enough available vessel credits to fulfill the request. <Side Effects> If successful, 'count' NAT vessels have been acquired for the account. <Returns> A list of vessel handles of the acquired vessels. """ return self.acquire_resources('nat', count) def acquire_random_resources(self, count): """ <Purpose> Acquire vessels (they can be LAN, WAN, NAT, or any combination of these). <Arguments> count The number of vessels to acquire. <Exceptions> The common exceptions described in the module comments, as well as: SeattleGENINotEnoughCredits If the account does not have enough available vessel credits to fulfill the request. <Side Effects> If successful, 'count' vessels have been acquired for the account. <Returns> A list of vessel handles of the acquired vessels. """ return self.acquire_resources('random', count) def acquire_resources(self, res_type, count): """ <Purpose> Acquire vessels. <Arguments> res_type A string describing the type of vessels to acquire. count The number of vessels to acquire. <Exceptions> The common exceptions described in the module comments, as well as: SeattleGENINotEnoughCredits If the account does not have enough available vessel credits to fulfill the request. <Side Effects> If successful, 'count' vessels have been acquired for the account. <Returns> A list of vessel handles of the acquired vessels. """ if not isinstance(res_type, basestring): raise TypeError("res_type must be a string") if type(count) not in [int, long]: raise TypeError("count must be an integer") rspec = {'rspec_type':res_type, 'number_of_nodes':count} return self._do_call(self.proxy.acquire_resources, rspec) def acquire_specific_vessels(self, handlelist): """ <Purpose> Attempt to acquire specific vessels. <Arguments> handlelist A list of vessel handles. <Exceptions> The common exceptions described in the module comments, as well as: SeattleGENINotEnoughCredits If the account does not have enough available vessel credits to fulfill the request. <Side Effects> If successful, zero or more vessels from handlelist have been acquired. <Returns> A list of vessel handles of the acquired vessels. """ _validate_handle_list(handlelist) return self._do_call(self.proxy.acquire_specific_vessels, handlelist) def release_resources(self, handlelist): """ <Purpose> Release vessels. <Arguments> handlelist A list of handles as returned by acquire_vessels() or found in the 'handle' key of the dictionaries returned by get_resource_info(). <Exceptions> The common exceptions described in the module comments. <Side Effects> If successful, the vessels in handlelist have been released. If not successful, it is possible that a partial set of the vessels was released. <Returns> None """ _validate_handle_list(handlelist) return self._do_call(self.proxy.release_resources, handlelist) def renew_resources(self, handlelist): """ <Purpose> Renew vessels. <Arguments> handlelist A list of handles as returned by acquire_vessels() or found in the 'handle' key of the dictionaries returned by get_resource_info(). <Exceptions> The common exceptions described in the module comments, as well as: SeattleGENINotEnoughCredits If the account is currently over its vessel credit limit, then vessels cannot be renewed until the account is no longer over its credit limit. <Side Effects> If successful, the vessels in handlelist have been renewed. If not successful, it is possible that a partial set of the vessels was renewed. <Returns> None """ _validate_handle_list(handlelist) return self._do_call(self.proxy.renew_resources, handlelist) def get_resource_info(self): """ <Purpose> Obtain information about acquired vessels. <Arguments> None <Exceptions> The common exceptions described in the module comments, as well as: <Side Effects> None <Returns> A list of dictionaries, where each dictionary describes a vessel that is currently acquired by the account. """ return self._do_call(self.proxy.get_resource_info) def get_account_info(self): """ <Purpose> Obtain information about the account. <Arguments> None <Exceptions> The common exceptions described in the module comments, as well as: <Side Effects> None <Returns> A dictionary with information about the account. """ return self._do_call(self.proxy.get_account_info) def get_public_key(self): """ <Purpose> Obtain the public key of the account. <Arguments> None <Exceptions> The common exceptions described in the module comments, as well as: None <Side Effects> None <Returns> A string containing the public key of the account. """ return self._do_call(self.proxy.get_public_key) def set_public_key(self, password, pubkeystring): """ <Purpose> Set the public key of the account. <Arguments> password The account password. This is required because changing the public key of the account cannot be done with just the api key. pubkeystring A string representing the new public key to be set for the account. <Exceptions> The common exceptions described in the module comments, as well as: InvalidRequestError If the pubkey is invalid. <Side Effects> The public key of the account is changed and will be updated on all vessels the account has acquired. <Returns> None """ self._do_pwauth_call(self.proxy.set_public_key, password, pubkeystring) def regenerate_api_key(self, password): """ <Purpose> Generate a new API key for the account.. <Arguments> password The account password. This is required because changing the api key of the account cannot be done with just the current api key. <Exceptions> The common exceptions described in the module comments, as well as: None <Side Effects> The account's api key has been changed. <Returns> The new api key for the account. """ api_key = self._do_pwauth_call(self.proxy.regenerate_api_key, password) self.auth['api_key'] = api_key return api_key def _validate_handle_list(handlelist): """ Raise a TypeError or ValueError if handlelist is not a non-empty list of string. """ if not isinstance(handlelist, list): raise TypeError("Invalid data type for handle list: " + str(type(handlelist))) for handle in handlelist: if not isinstance(handle, basestring): raise TypeError("Invalid data type for a handle in the handle list: " + str(type(handle))) if not handlelist: raise ValueError("Given handlelist is empty.") def _get_ssl_transport(ca_certs_file): """ Returns an object usable as the transport for an xmlrpclib proxy. This will be an M2Crypto.m2xmlrpclib.SSL_Transport that has been configured with a context that has the ca_certs_file loaded, will not allow SSLv2, and will reject certificate names that don't match the hostname. """ | def main(): global configuration if not FOREGROUND: # Background ourselves. daemon.daemonize() # ensure that only one instance is running at a time... gotlock = runonce.getprocesslock("seattlenodemanager") if gotlock == True: # I got the lock. All is well... pass else: if gotlock: servicelogger.log("[ERROR]:Another node manager process (pid: " + str(gotlock) + ") is running") else: servicelogger.log("[ERROR]:Another node manager process is running") return # I'll grab the necessary information first... servicelogger.log("[INFO]:Loading config") # BUG: Do this better? Is this the right way to engineer this? configuration = persist.restore_object("nodeman.cfg") # Armon: initialize the network restrictions initialize_ip_interface_restrictions(configuration) # ZACK BOKA: For Linux and Darwin systems, check to make sure that the new # seattle crontab entry has been installed in the crontab. # Do this here because the "nodeman.cfg" needs to have been read # into configuration via the persist module. if nonportable.ostype == 'Linux' or nonportable.ostype == 'Darwin': if 'crontab_updated_for_2009_installer' not in configuration or \ configuration['crontab_updated_for_2009_installer'] == False: try: import update_crontab_entry modified_crontab_entry = \ update_crontab_entry.modify_seattle_crontab_entry() # If updating the seattle crontab entry succeeded, then update the # 'crontab_updated_for_2009_installer' so the nodemanager no longer # tries to update the crontab entry when it starts up. if modified_crontab_entry: configuration['crontab_updated_for_2009_installer'] = True persist.commit_object(configuration,"nodeman.cfg") except Exception,e: exception_traceback_string = traceback.format_exc() servicelogger.log("[ERROR]: The following error occured when " \ + "modifying the crontab for the new 2009 " \ + "seattle crontab entry: " \ + exception_traceback_string) # get the external IP address... # BUG: What if my external IP changes? (A problem throughout) myip = None while True: try: # Try to find our external IP. myip = emulcomm.getmyip() except Exception, e: # If we aren't connected to the internet, emulcomm.getmyip() raises this: if len(e.args) >= 1 and e.args[0] == "Cannot detect a connection to the Internet.": # So we try again. pass else: # It wasn't emulcomm.getmyip()'s exception. re-raise. raise else: # We succeeded in getting our external IP. Leave the loop. break time.sleep(0.1) vesseldict = nmrequesthandler.initialize(myip, configuration['publickey'], version) # Start accepter... myname = start_accepter() #send our advertised name to the log servicelogger.log('myname = '+str(myname)) # Start worker thread... start_worker_thread(configuration['pollfrequency']) # Start advert thread... start_advert_thread(vesseldict, myname, configuration['publickey']) # Start status thread... start_status_thread(vesseldict,configuration['pollfrequency']) # we should be all set up now. servicelogger.log("[INFO]:Started") # I will count my iterations through the loop so that I can log a message # periodically. This makes it clear I am alive. times_through_the_loop = 0 # BUG: Need to exit all when we're being upgraded while True: # E.K Previous there was a check to ensure that the acceptor # thread was started. There is no way to actually check this # and this code was never executed, so i removed it completely if not is_worker_thread_started(): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting worker...") start_worker_thread(configuration['pollfrequency']) if should_start_waitable_thread('advert','Advertisement Thread'): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting advert...") start_advert_thread(vesseldict, myname, configuration['publickey']) if should_start_waitable_thread('status','Status Monitoring Thread'): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting status...") start_status_thread(vesseldict,configuration['pollfrequency']) if not runonce.stillhaveprocesslock("seattlenodemanager"): servicelogger.log("[ERROR]:The node manager lost the process lock...") harshexit.harshexit(55) time.sleep(configuration['pollfrequency']) # if I've been through the loop enough times, log this... times_through_the_loop = times_through_the_loop + 1 if times_through_the_loop % LOG_AFTER_THIS_MANY_ITERATIONS == 0: servicelogger.log("[INFO]: node manager is alive...") | 99fb6e1ca74de6038aa668a9cd8075a52e5e9538 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7995/99fb6e1ca74de6038aa668a9cd8075a52e5e9538/addme.py |
main() except Exception,e: servicelogger.log_last_exception() harshexit.harshexit(15) | import M2Crypto except ImportError, err: raise ImportError("In order to use the SeattleGENI XMLRPC client with " + "allow_ssl_insecure=False, you need M2Crypto " + "installed. " + str(err)) class M2CryptoSSLTransport(M2Crypto.m2xmlrpclib.SSL_Transport): def request(self, host, handler, request_body, verbose=0): if host.find(":") == -1: host = host + ":443" return M2Crypto.m2xmlrpclib.SSL_Transport.request(self, host, handler, request_body, verbose) ctx = M2Crypto.SSL.Context("sslv3") ctx.set_verify(M2Crypto.SSL.verify_peer | M2Crypto.SSL.verify_fail_if_no_peer_cert, depth=9) if ctx.load_verify_locations(ca_certs_file) != 1: raise SeattleGENIError("No CA certs found in file: " + ca_certs_file) return M2CryptoSSLTransport(ctx) class SeattleGENIError(Exception): """Base class for exceptions raised by the SeattleGENIClient.""" class CommunicationError(SeattleGENIError): """ Indicates that XMLRPC communication failed. """ class InternalError(SeattleGENIError): """ Indicates an unexpected error occurred, probably either a bug in this client or a bug in SeattleGENI. """ class AuthenticationError(SeattleGENIError): """Indicates an authentication failure (invalid username and/or API key).""" def __init__(self, msg=None): if msg is None: msg = "Authentication failed. Invalid username and/or API key." SeattleGENIError.__init__(self, msg) class InvalidRequestError(SeattleGENIError): """Indicates that the request is invalid.""" class NotEnoughCreditsError(SeattleGENIError): """ Indicates that the requested operation requires more vessel credits to be available then the account currently has. """ class UnableToAcquireResourcesError(SeattleGENIError): """ Indicates that the requested operation failed because SeattleGENI was unable to acquire the requested resources. """ if __name__ == "__main__": main() | def main(): global configuration if not FOREGROUND: # Background ourselves. daemon.daemonize() # ensure that only one instance is running at a time... gotlock = runonce.getprocesslock("seattlenodemanager") if gotlock == True: # I got the lock. All is well... pass else: if gotlock: servicelogger.log("[ERROR]:Another node manager process (pid: " + str(gotlock) + ") is running") else: servicelogger.log("[ERROR]:Another node manager process is running") return # I'll grab the necessary information first... servicelogger.log("[INFO]:Loading config") # BUG: Do this better? Is this the right way to engineer this? configuration = persist.restore_object("nodeman.cfg") # Armon: initialize the network restrictions initialize_ip_interface_restrictions(configuration) # ZACK BOKA: For Linux and Darwin systems, check to make sure that the new # seattle crontab entry has been installed in the crontab. # Do this here because the "nodeman.cfg" needs to have been read # into configuration via the persist module. if nonportable.ostype == 'Linux' or nonportable.ostype == 'Darwin': if 'crontab_updated_for_2009_installer' not in configuration or \ configuration['crontab_updated_for_2009_installer'] == False: try: import update_crontab_entry modified_crontab_entry = \ update_crontab_entry.modify_seattle_crontab_entry() # If updating the seattle crontab entry succeeded, then update the # 'crontab_updated_for_2009_installer' so the nodemanager no longer # tries to update the crontab entry when it starts up. if modified_crontab_entry: configuration['crontab_updated_for_2009_installer'] = True persist.commit_object(configuration,"nodeman.cfg") except Exception,e: exception_traceback_string = traceback.format_exc() servicelogger.log("[ERROR]: The following error occured when " \ + "modifying the crontab for the new 2009 " \ + "seattle crontab entry: " \ + exception_traceback_string) # get the external IP address... # BUG: What if my external IP changes? (A problem throughout) myip = None while True: try: # Try to find our external IP. myip = emulcomm.getmyip() except Exception, e: # If we aren't connected to the internet, emulcomm.getmyip() raises this: if len(e.args) >= 1 and e.args[0] == "Cannot detect a connection to the Internet.": # So we try again. pass else: # It wasn't emulcomm.getmyip()'s exception. re-raise. raise else: # We succeeded in getting our external IP. Leave the loop. break time.sleep(0.1) vesseldict = nmrequesthandler.initialize(myip, configuration['publickey'], version) # Start accepter... myname = start_accepter() #send our advertised name to the log servicelogger.log('myname = '+str(myname)) # Start worker thread... start_worker_thread(configuration['pollfrequency']) # Start advert thread... start_advert_thread(vesseldict, myname, configuration['publickey']) # Start status thread... start_status_thread(vesseldict,configuration['pollfrequency']) # we should be all set up now. servicelogger.log("[INFO]:Started") # I will count my iterations through the loop so that I can log a message # periodically. This makes it clear I am alive. times_through_the_loop = 0 # BUG: Need to exit all when we're being upgraded while True: # E.K Previous there was a check to ensure that the acceptor # thread was started. There is no way to actually check this # and this code was never executed, so i removed it completely if not is_worker_thread_started(): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting worker...") start_worker_thread(configuration['pollfrequency']) if should_start_waitable_thread('advert','Advertisement Thread'): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting advert...") start_advert_thread(vesseldict, myname, configuration['publickey']) if should_start_waitable_thread('status','Status Monitoring Thread'): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting status...") start_status_thread(vesseldict,configuration['pollfrequency']) if not runonce.stillhaveprocesslock("seattlenodemanager"): servicelogger.log("[ERROR]:The node manager lost the process lock...") harshexit.harshexit(55) time.sleep(configuration['pollfrequency']) # if I've been through the loop enough times, log this... times_through_the_loop = times_through_the_loop + 1 if times_through_the_loop % LOG_AFTER_THIS_MANY_ITERATIONS == 0: servicelogger.log("[INFO]: node manager is alive...") | 99fb6e1ca74de6038aa668a9cd8075a52e5e9538 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7995/99fb6e1ca74de6038aa668a9cd8075a52e5e9538/addme.py |
times = windows_api.process_times(pid) | times = process_times(pid) | def get_process_cpu_time(pid): """ <Purpose> See process_times <Arguments> See process_times <Exceptions> See process_times <Returns> The amount of CPU time used by the kernel and user in seconds. """ # Get the times times = windows_api.process_times(pid) # Add kernel and user time together... It's in units of 100ns so divide # by 10,000,000 total_time = (times['KernelTime'] + times['UserTime'] ) / 10000000.0 return total_time | 98ecf300d8b4d13608163ca21c082bd5f56e5106 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7995/98ecf300d8b4d13608163ca21c082bd5f56e5106/windows_api.py |
copy_to_target("production_nat_new/src/nmpatch/nmmain.py", target_dir) copy_to_target("production_nat_new/src/nmpatch/nmclient.repy", target_dir) copy_to_target("production_nat_new/src/nmpatch/sockettimeout.repy", target_dir) copy_to_target("production_nat_new/src/nmpatch/ShimStackInterface.repy", target_dir) | def main(): repytest = False RANDOMPORTS = False target_dir = None for arg in sys.argv[1:]: # -t means we will copy repy tests if arg == '-t': repytest = True # The user wants us to fill in the port numbers randomly. elif arg == '-randomports': RANDOMPORTS = True # Not a flag? Assume it's the target directory else: target_dir = arg # We need a target dir. If one isn't found in argv, quit. if target_dir is None: help_exit("Please pass the target directory as a parameter.") #store root directory current_dir = os.getcwd() # Make sure they gave us a valid directory if not( os.path.isdir(target_dir) ): help_exit("given foldername is not a directory") #set working directory to the test folder os.chdir(target_dir) files_to_remove = glob.glob("*") #clean the test folder for f in files_to_remove: if os.path.isdir(f): shutil.rmtree(f) else: os.remove(f) #go back to root project directory os.chdir(current_dir) #now we copy the necessary files to the test folder copy_to_target("repy/*", target_dir) copy_to_target("nodemanager/*", target_dir) copy_to_target("portability/*", target_dir) copy_to_target("seattlelib/*", target_dir) copy_to_target("seash/*", target_dir) copy_to_target("softwareupdater/*", target_dir) copy_to_target("autograder/nm_remote_api.mix", target_dir) copy_to_target("keydaemon/*", target_dir) # The license must be included in anything we distribute. copy_to_target("LICENSE.TXT", target_dir) # Uncomment this when its ready to be in production. # Copy over the files needed for using shim. # copy_to_target("production_nat_new/src/nmpatch/nmmain.py", target_dir) # copy_to_target("production_nat_new/src/nmpatch/nmclient.repy", target_dir) # copy_to_target("production_nat_new/src/nmpatch/sockettimeout.repy", target_dir) # copy_to_target("production_nat_new/src/nmpatch/ShimStackInterface.repy", target_dir) # Only copy the tests if they were requested. if repytest: # The test framework itself. copy_to_target("utf/*.py", target_dir) # The various tests. copy_to_target("repy/tests/*", target_dir) copy_to_target("nodemanager/tests/*", target_dir) copy_to_target("portability/tests/*", target_dir) copy_to_target("seash/tests/*", target_dir) copy_to_target("oddball/tests/*", target_dir) copy_to_target("seattlelib/tests/*", target_dir) copy_to_target("keydaemon/tests/*", target_dir) copy_to_target("utf/tests/*", target_dir) # jsamuel: This file, dist/update_crontab_entry.py, is directly included by # make_base_installers and appears to be a candidate for removal someday. # I assume zackrb needed this for installer testing. copy_to_target("dist/update_crontab_entry.py", target_dir) #set working directory to the test folder os.chdir(target_dir) #call the process_mix function to process all mix files in the target directory process_mix("repypp.py") # set up dynamic port information if RANDOMPORTS: portstouseasints = random.sample(range(52000, 53000), 3) portstouseasstr = [] for portint in portstouseasints: portstouseasstr.append(str(portint)) print "Randomly chosen ports: ",portstouseasstr testportfiller.replace_ports(portstouseasstr, portstouseasstr) else: # if this isn't specified, just use the default ports... testportfiller.replace_ports(['12345','12346','12347'], ['12345','12346','12347']) #go back to root project directory os.chdir(current_dir) | 3aba7caf204d6bc10d1bd5220f4752824294b94e /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7995/3aba7caf204d6bc10d1bd5220f4752824294b94e/preparetest.py |
|
try: advertise_announce(unique_id, myip, DNS_CACHE_TTL) servicelogger.log("[INFO]: Advertised " + str(unique_id) + " which maps to " + myip) except Exception, error: if 'announce error' in str(error): pass else: raise Exception(error) | advertise_success = False while not advertise_success: try: advertise_announce(unique_id, myip, DNS_CACHE_TTL) servicelogger.log("[INFO]: Advertised " + str(unique_id) + " which maps to " + myip) advertise_success = True except Exception, error: if 'announce error' in str(error): advertise_success = True else: advertise_success = False | def advertise_to_DNS(unique_id): """ Advertise unique_id to the zenodotus DNS server. We strip away whatever that follows the NAME_SERVER part of the unique_id. For instance, if our unique_id is abc.NAME_SERVER:1234@xyz, then we only advertise abc.NAME_SERVER. """ # IP that maps to the unique_id myip = emulcomm.getmyip() # Extract the part of unique_id up to the name server, # i.e. xyz.zenodotus.washington.edu, and discard whatever that follows name_server_pos = unique_id.find(NAME_SERVER) if name_server_pos > -1: unique_id = unique_id[0 : name_server_pos + len(NAME_SERVER)] else: raise Exception("Invalid unique_id format: '" + str(unique_id) + "'") try: advertise_announce(unique_id, myip, DNS_CACHE_TTL) servicelogger.log("[INFO]: Advertised " + str(unique_id) + " which maps to " + myip) except Exception, error: if 'announce error' in str(error): # We can confidently drop the exception here. The advertisement service # can sometimes be flaky, yet it can guarantee advertisement of our # key-value pair on at least one of the three components. Thus, we are # printing the error message as a warning here. pass else: raise Exception(error) | 717ad4f65641a2bd4f9fb31cf3f7452634fae3ba /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7995/717ad4f65641a2bd4f9fb31cf3f7452634fae3ba/nmmain.py |
copy_to_target("production_nat_new/src/nmpatch/nmmain.py") copy_to_target("production_nat_new/src/nmpatch/nmclient.repy") copy_to_target("production_nat_new/src/nmpatch/sockettimeout.repy") copy_to_target("production_nat_new/src/nmpatch/ShimStackInterface.repy") | copy_to_target("production_nat_new/src/nmpatch/nmmain.py", target_dir) copy_to_target("production_nat_new/src/nmpatch/nmclient.repy", target_dir) copy_to_target("production_nat_new/src/nmpatch/sockettimeout.repy", target_dir) copy_to_target("production_nat_new/src/ShimStackInterface.repy", target_dir) | def main(): repytest = False RANDOMPORTS = False target_dir = None for arg in sys.argv[1:]: # -t means we will copy repy tests if arg == '-t': repytest = True # The user wants us to fill in the port numbers randomly. elif arg == '-randomports': RANDOMPORTS = True # Not a flag? Assume it's the target directory else: target_dir = arg # We need a target dir. If one isn't found in argv, quit. if target_dir is None: help_exit("Please pass the target directory as a parameter.") #store root directory current_dir = os.getcwd() # Make sure they gave us a valid directory if not( os.path.isdir(target_dir) ): help_exit("given foldername is not a directory") #set working directory to the test folder os.chdir(target_dir) files_to_remove = glob.glob("*") #clean the test folder for f in files_to_remove: if os.path.isdir(f): shutil.rmtree(f) else: os.remove(f) #go back to root project directory os.chdir(current_dir) #now we copy the necessary files to the test folder copy_to_target("repy/*", target_dir) copy_to_target("nodemanager/*", target_dir) copy_to_target("portability/*", target_dir) copy_to_target("seattlelib/*", target_dir) copy_to_target("seash/*", target_dir) copy_to_target("softwareupdater/*", target_dir) copy_to_target("autograder/nm_remote_api.mix", target_dir) copy_to_target("keydaemon/*", target_dir) # The license must be included in anything we distribute. copy_to_target("LICENSE.TXT", target_dir) # Copy over the files needed for using shim. copy_to_target("production_nat_new/src/nmpatch/nmmain.py") copy_to_target("production_nat_new/src/nmpatch/nmclient.repy") copy_to_target("production_nat_new/src/nmpatch/sockettimeout.repy") copy_to_target("production_nat_new/src/nmpatch/ShimStackInterface.repy") # Only copy the tests if they were requested. if repytest: # The test framework itself. copy_to_target("utf/*.py", target_dir) # The various tests. copy_to_target("repy/tests/*", target_dir) copy_to_target("nodemanager/tests/*", target_dir) copy_to_target("portability/tests/*", target_dir) copy_to_target("seash/tests/*", target_dir) copy_to_target("oddball/tests/*", target_dir) copy_to_target("seattlelib/tests/*", target_dir) copy_to_target("keydaemon/tests/*", target_dir) copy_to_target("utf/tests/*", target_dir) # jsamuel: This file, dist/update_crontab_entry.py, is directly included by # make_base_installers and appears to be a candidate for removal someday. # I assume zackrb needed this for installer testing. copy_to_target("dist/update_crontab_entry.py", target_dir) #set working directory to the test folder os.chdir(target_dir) #call the process_mix function to process all mix files in the target directory process_mix("repypp.py") # set up dynamic port information if RANDOMPORTS: portstouseasints = random.sample(range(52000, 53000), 3) portstouseasstr = [] for portint in portstouseasints: portstouseasstr.append(str(portint)) print "Randomly chosen ports: ",portstouseasstr testportfiller.replace_ports(portstouseasstr, portstouseasstr) else: # if this isn't specified, just use the default ports... testportfiller.replace_ports(['12345','12346','12347'], ['12345','12346','12347']) #go back to root project directory os.chdir(current_dir) | 5eb8f5e933ecbefef0bb8d0973860c7ae1bf7791 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7995/5eb8f5e933ecbefef0bb8d0973860c7ae1bf7791/preparetest.py |
def drvterm(t,p,q,l,m): dv=t.betx**(p/2.)*t.bety**(q/2.) dv*=exp(+2j*pi*((p-2*l)*t.mux+(q-2*m)*t.muy)) | def drvterm(t,p=0,q=0,l=0,m=0): dv=t.betx**(abs(p)/2.)*t.bety**(abs(q)/2.) dv*=_n.exp(+2j*pi*((p-2*l)*t.mux+(q-2*m)*t.muy)) | def drvterm(t,p,q,l,m): dv=t.betx**(p/2.)*t.bety**(q/2.) dv*=exp(+2j*pi*((p-2*l)*t.mux+(q-2*m)*t.muy)) return dv | 3d13b9271bfc1293d64594ec1e5c0d576c680f55 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5983/3d13b9271bfc1293d64594ec1e5c0d576c680f55/optics.py |
s=self.xaxis | s=self.ont.s | def _lattice(self,names,color,lbl): | a4f2abb475d4e687c401a7ca9d020f002dc0ec02 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5983/a4f2abb475d4e687c401a7ca9d020f002dc0ec02/optics.py |
prenoms=[unicode(x.strip(),"utf-8") for x in open("prenoms.txt")] | prenoms=[unicode(x.strip(),"utf-8").capitalize() for x in open("prenoms.txt")] | def __init__(self, parent=None): QtGui.QWidget.__init__(self, parent) self.ui = Ui_MainWindow() self.ui.setupUi(self) | 19c8bdeb0c3f63a86180290e24195c2e59889930 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/13828/19c8bdeb0c3f63a86180290e24195c2e59889930/vote.py |
p1,p2=self.combis.pop() while frozenset((p1,p2)) in self.ballots.ballots.keys(): try: p1,p2=self.combis.pop() except IndexError: | p1,p2=None,None while p1 is None or frozenset((p1,p2)) in self.ballots.ballots.keys(): if not self.combis: | def update(self): p1,p2=self.combis.pop() while frozenset((p1,p2)) in self.ballots.ballots.keys(): try: p1,p2=self.combis.pop() except IndexError: print "Thanks, you are done!" QtGui.QApplication.instance().quit() sys.exit(0) self.ui.prenom1.setText(p1) self.ui.prenom2.setText(p2) | 19c8bdeb0c3f63a86180290e24195c2e59889930 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/13828/19c8bdeb0c3f63a86180290e24195c2e59889930/vote.py |
QtGui.QApplication.instance().quit() | def update(self): p1,p2=self.combis.pop() while frozenset((p1,p2)) in self.ballots.ballots.keys(): try: p1,p2=self.combis.pop() except IndexError: print "Thanks, you are done!" QtGui.QApplication.instance().quit() sys.exit(0) self.ui.prenom1.setText(p1) self.ui.prenom2.setText(p2) | 19c8bdeb0c3f63a86180290e24195c2e59889930 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/13828/19c8bdeb0c3f63a86180290e24195c2e59889930/vote.py |
|
old_sep,old_count=self.ballots[self.get_couple(ballot)] | d1,d2,old_sep,old_count=self.ballots[self.get_couple(ballot)] | def add(self,ballot): winner,sep,other,count=ballot winner=winner.capitalize() other=other.capitalize() if not self.is_in(ballot): self.ballots[self.get_couple(ballot)]=(winner,sep,other,count) else: old_sep,old_count=self.ballots[self.get_couple(ballot)] assert(old_sep==sep) self.ballots[self.get_couple(ballot)]=(winner,sep,other,old_count+count) | 794ff6b34413bf5aed81fecb6828ed21281f9550 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/13828/794ff6b34413bf5aed81fecb6828ed21281f9550/vote.py |
return repr(count)+":"+winner+sep+other | return unicode(repr(count))+u":"+winner+sep+other | def ballot_repr(self,ballot): winner,sep,other,count=ballot return repr(count)+":"+winner+sep+other | 794ff6b34413bf5aed81fecb6828ed21281f9550 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/13828/794ff6b34413bf5aed81fecb6828ed21281f9550/vote.py |
f.write((self.ballot_repr(ballot)+"\n").encode("utf-8")) | f.write((self.ballot_repr(ballot)+u"\n").encode("utf-8")) | def save(self): with open(self.filename,"w") as f: for ballot in self.ballots.values(): f.write((self.ballot_repr(ballot)+"\n").encode("utf-8")) | 794ff6b34413bf5aed81fecb6828ed21281f9550 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/13828/794ff6b34413bf5aed81fecb6828ed21281f9550/vote.py |
b=(unicode(self.ui.prenom1.text()),"=",unicode(self.ui.prenom2.text()),1) | b=(unicode(self.ui.prenom1.text()),u"=",unicode(self.ui.prenom2.text()),1) | def count_ballot_and_update(self,win): if win == 0: b=(unicode(self.ui.prenom1.text()),"=",unicode(self.ui.prenom2.text()),1) elif win==1: b=(unicode(self.ui.prenom1.text()),">",unicode(self.ui.prenom2.text()),1) elif win==2: b=(unicode(self.ui.prenom2.text()),">",unicode(self.ui.prenom1.text()),1) self.ballots.add(b) self.ballots.save() self.update() | 794ff6b34413bf5aed81fecb6828ed21281f9550 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/13828/794ff6b34413bf5aed81fecb6828ed21281f9550/vote.py |
b=(unicode(self.ui.prenom1.text()),">",unicode(self.ui.prenom2.text()),1) | b=(unicode(self.ui.prenom1.text()),u">",unicode(self.ui.prenom2.text()),1) | def count_ballot_and_update(self,win): if win == 0: b=(unicode(self.ui.prenom1.text()),"=",unicode(self.ui.prenom2.text()),1) elif win==1: b=(unicode(self.ui.prenom1.text()),">",unicode(self.ui.prenom2.text()),1) elif win==2: b=(unicode(self.ui.prenom2.text()),">",unicode(self.ui.prenom1.text()),1) self.ballots.add(b) self.ballots.save() self.update() | 794ff6b34413bf5aed81fecb6828ed21281f9550 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/13828/794ff6b34413bf5aed81fecb6828ed21281f9550/vote.py |
b=(unicode(self.ui.prenom2.text()),">",unicode(self.ui.prenom1.text()),1) | b=(unicode(self.ui.prenom2.text()),u">",unicode(self.ui.prenom1.text()),1) | def count_ballot_and_update(self,win): if win == 0: b=(unicode(self.ui.prenom1.text()),"=",unicode(self.ui.prenom2.text()),1) elif win==1: b=(unicode(self.ui.prenom1.text()),">",unicode(self.ui.prenom2.text()),1) elif win==2: b=(unicode(self.ui.prenom2.text()),">",unicode(self.ui.prenom1.text()),1) self.ballots.add(b) self.ballots.save() self.update() | 794ff6b34413bf5aed81fecb6828ed21281f9550 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/13828/794ff6b34413bf5aed81fecb6828ed21281f9550/vote.py |
base.__name__, | base.__plain_name__, | def __new__(meta_class, class_name, bases, class_dict, **kw_arguments): """ Create a new type object, for example through a 'class' statement. Behaves like a class method and is called before __init__(). """ if kw_arguments: # Assigning values to the parameters means specializing the # template. Therefore, derive a subclass from this meta-class # and make it create the actual type object. specialized_meta_class = meta_class.__specialize( kw_arguments ) # Base classes must have the same specialized meta-class. specialized_bases = [] for base in bases: if base.__class__ is meta_class: specialized_bases.append( specialized_meta_class.__new__( specialized_meta_class, base.__name__, base.__bases__, base.__dict__ ) ) else: specialized_bases.append( base ) return specialized_meta_class.__new__( specialized_meta_class, class_name, tuple( specialized_bases ), class_dict ) else: # No specialization. Create a type object. extended_name = meta_class.__template_name( class_name, meta_class.__parameters__, meta_class.__parameter_map__ ) extended_dict = meta_class.__parameter_map__.copy() extended_dict.update( class_dict ) extended_dict[ "__plain_name__" ] = class_name return type.__new__( meta_class, extended_name, bases, extended_dict ) | ba5c445a8609ca5bf441edcaf37518cacef2ed40 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10264/ba5c445a8609ca5bf441edcaf37518cacef2ed40/types.py |
@param[in] stats The @c pstats.Stats compatible object whose data is to be represented as the new CallGraph. | @param stats The @c pstats.Stats compatible object whose data is to be represented as the new CallGraph. | def __init__(self, stats): """ Constructs a CallGraph from the given @p stats object. @param[in] stats The @c pstats.Stats compatible object whose data is to be represented as the new CallGraph. """ # Function -> ( Outgoing Calls, Incoming Calls ) self.__functions = {} # Call -> ( Calling Function, Called Function ) self.__calls = {} # Indexes to look up Functions self.__fln_index = {} # (filename, line number, name) -> Function self.__namespace_index = {} # namespace name -> set of Functions self.add(stats) | 472ab6914163cde4ce9c20ac96cd86b1a7236c0a /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10264/472ab6914163cde4ce9c20ac96cd86b1a7236c0a/callgraph.py |
def __bool__(self): """ Test whether the element is non-zero: return @c True if, and only if, it is non-zero. Otherwise return @c False. Implicit conversions to boolean (truth) values use this method, for example when @c x is an element of a Field: @code if x: do_something() @endcode @exception NotImplementedError if this method is called; subclasses must implement this operation. """ raise NotImplementedError def __eq__(self, other): """ Test whether another element @p other is equal to @p self; return @c True if that is the case. The infix operator @c == calls this method, for example: @code if self == other: do_something() @endcode @exception NotImplementedError if this method is called; subclasses must implement this operation. """ raise NotImplementedError def __add__(self, other): """ Return the sum of @p self and @p other. The infix operator @c + calls this method if @p self is the left summand, for example: @code result = self + other @endcode @exception NotImplementedError if this method is called; subclasses must implement this operation. """ raise NotImplementedError def __neg__(self): """ Return the additive inverse of @p self. The unary minus operator @c -x calls this method, for example: @code negated = -self @endcode @exception NotImplementedError if this method is called; subclasses must implement this operation. """ raise NotImplementedError def __mul__(self, other): """ Return the product of @p self and @p other. The infix operator @c + calls this method if @p self is the left factor, for example: @code result = self * other @endcode @exception NotImplementedError if this method is called; subclasses must implement this operation. """ raise NotImplementedError def multiplicative_inverse(self): """ Return the multiplicative inverse of @p self. @exception NotImplementedError if this method is called; subclasses must implement this operation. """ raise NotImplementedError | def __bool__(self): """ Test whether the element is non-zero: return @c True if, and only if, it is non-zero. Otherwise return @c False. Implicit conversions to boolean (truth) values use this method, for example when @c x is an element of a Field: @code if x: do_something() @endcode @exception NotImplementedError if this method is called; subclasses must implement this operation. """ raise NotImplementedError | 5cdde871f8683be9e30f8752b360f472c96a7f79 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10264/5cdde871f8683be9e30f8752b360f472c96a7f79/__init__.py |
|
method, for example: | method; for example: | def __neq__(self, other): """ Test whether another element @p other is different from @p self; return @c True if that is the case. The infix operator @c != calls this method, for example: @code if self != other: do_something() @endcode """ return not self.__eq__( other ) | 5cdde871f8683be9e30f8752b360f472c96a7f79 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10264/5cdde871f8683be9e30f8752b360f472c96a7f79/__init__.py |
calls this method if @p self is the minuend (left element), for example: | calls this method if @p self is the minuend (left element); for example: | def __sub__(self, other): """ Return the difference of @p self and @p other. The infix operator @c - calls this method if @p self is the minuend (left element), for example: @code result = self - other @endcode """ return self.__add__( -other ) | 5cdde871f8683be9e30f8752b360f472c96a7f79 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10264/5cdde871f8683be9e30f8752b360f472c96a7f79/__init__.py |
calls this method if @p self is the dividend, for example: | calls this method if @p self is the dividend; for example: | def __truediv__(self, other): """ Return the quotient of @p self and @p other. The infix operator @c / calls this method if @p self is the dividend, for example: @code result = self / other @endcode @exception ZeroDivisionError if @p other is zero. @exception TypeError if @p other lacks the multiplicative_inverse() method and cannot be cast to @p self's class. """ if not other: raise ZeroDivisionError try: other = self.__class__(other) return self.__mul__( other.multiplicative_inverse() ) except TypeError: return NotImplemented | 5cdde871f8683be9e30f8752b360f472c96a7f79 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10264/5cdde871f8683be9e30f8752b360f472c96a7f79/__init__.py |
print( "platform: {0}".format( platform.platform() ), file = timing_file ) print( "python: {0}".format( platform.python_version() ), file = timing_file ) print( "wall time (s): {0}".format( wall_time ), file = timing_file ) print( "user time (s): {0}".format( user_time ), file = timing_file ) print( "sys time (s): {0}".format( sys_time ), file = timing_file ) print( "cpu time (s): {0}".format( cpu_time), file = timing_file ) print( "max memory (kB): {0}".format( max_rss ), file = timing_file ) | info = [ "node: {0}".format( platform.node() ), "platform: {0}".format( platform.platform() ), "python: {0}".format( platform.python_version() ), "date (Y/M/D h:m:s): {0}".format( datetime.now().strftime("%Y/%m/%d %H:%M:%S") ), "wall time (s): {0}".format( wall_time ), "user time (s): {0}".format( user_time ), "sys time (s): {0}".format( sys_time ), "cpu time (s): {0}".format( cpu_time), "max memory (kB): {0}".format( max_rss ), ] | def dump_data(self, extra_information = {}): if self.is_running(): self.stop() # Yes, this is a race condition. self.__profile_file.close() self.__profile.dump_stats( self.__profile_file.name ) | 8c3213d1cd42162a32b568732093724083e1b2e4 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10264/8c3213d1cd42162a32b568732093724083e1b2e4/running.py |
print( "{0}: {1}".format( key, value ), file = timing_file ) | info.append( "{0}: {1}".format( key, value ) ) print( "\n".join( info ), file=timing_file ) | def dump_data(self, extra_information = {}): if self.is_running(): self.stop() # Yes, this is a race condition. self.__profile_file.close() self.__profile.dump_stats( self.__profile_file.name ) | 8c3213d1cd42162a32b568732093724083e1b2e4 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10264/8c3213d1cd42162a32b568732093724083e1b2e4/running.py |
while line and not line.strip() and line.strip().startswith( " | while line and (not line.strip() or line.strip().startswith( " | def __iter__(self): # Register: increase the number of parsers with self.__lock() as data: parsers, current_offset, current_line = data self.__update( parsers + 1, current_offset, current_line ) # Iterate until the file ends line = self.__file.readline() while line: with self.__lock() as data: parsers, current_offset, current_line = data self.__file.seek( current_offset ) line = self.__file.readline() current_line += 1 | 8c3213d1cd42162a32b568732093724083e1b2e4 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10264/8c3213d1cd42162a32b568732093724083e1b2e4/running.py |
yield current_line, tuple( line.split( self.__separator ) ) | yield current_line, tuple( line.strip().split( self.__separator ) ) | def __iter__(self): # Register: increase the number of parsers with self.__lock() as data: parsers, current_offset, current_line = data self.__update( parsers + 1, current_offset, current_line ) # Iterate until the file ends line = self.__file.readline() while line: with self.__lock() as data: parsers, current_offset, current_line = data self.__file.seek( current_offset ) line = self.__file.readline() current_line += 1 | 8c3213d1cd42162a32b568732093724083e1b2e4 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10264/8c3213d1cd42162a32b568732093724083e1b2e4/running.py |
self.__input = [ ( "<stdin>", [ (0, tuple( arguments ) ) ] ) ] | self.__input = [] if arguments: self.__input.append( ( "<stdin>", [ (0, tuple( arguments ) ) ] ) ) | def __init__(self, algorithm, arguments=sys.argv[1:], algorithm_version="<unknown>" ): self.__algorithm = algorithm self.__algorithm_version = algorithm_version options, arguments = self._parse_arguments( arguments, algorithm_version ) # __input is a list of pairs (<name>, <iterable>); # <iterable> is expected to return pairs (<item_number>, <item>). # See run(). self.__input = [ ( "<stdin>", [ (0, tuple( arguments ) ) ] ) ] # Fail early: immediately try to open the file if options.input_file: input_parser = ParallelParser( options.input_file ) self.__input.append( ( options.input_file, input_parser ) ) # Initialize the remaining attributes. self._open_output( options.output_file ) | 8c3213d1cd42162a32b568732093724083e1b2e4 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10264/8c3213d1cd42162a32b568732093724083e1b2e4/running.py |
modulo_primes = greedy_prime_factors( | torsion_primes = greedy_prime_factors( | def frobenius_trace(curve): """ Compute the trace of the Frobenius endomorphism for the given EllpiticCurve @p curve. This is an implementation of Schoof's original algorithm for counting the points of an elliptic curve over a finite field. @return The trace @f$ t @f$ of the Frobenius endomorphism. The number of points on the curve then is @f$ q + 1 - t @f$, where @f$ q @f$ is the size of the finite field over which the curve was defined. """ trace_congruences = [] search_range = hasse_frobenius_trace_range( curve.field() ) modulo_primes = greedy_prime_factors( len(search_range), curve.field().characteristic() ) # To avoid multivariate polynomial arithmetic, make l=2 a special case. if 2 in modulo_primes: trace_congruences.append( frobenius_trace_mod_2( curve ) ) modulo_primes.remove( 2 ) torsion_group = LTorsionGroup( curve ) for prime in modulo_primes: trace_congruences.append( frobenius_trace_mod_l( torsion_group( prime ) ) ) trace_congruence = solve_congruence_equations( trace_congruences ) return representative_in_range( trace_congruence, search_range ) | 7cc864441f472f2e8b01251a88abb79d737c24ac /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10264/7cc864441f472f2e8b01251a88abb79d737c24ac/reduced_computation_schoof.py |
if 2 in modulo_primes: | if 2 in torsion_primes: | def frobenius_trace(curve): """ Compute the trace of the Frobenius endomorphism for the given EllpiticCurve @p curve. This is an implementation of Schoof's original algorithm for counting the points of an elliptic curve over a finite field. @return The trace @f$ t @f$ of the Frobenius endomorphism. The number of points on the curve then is @f$ q + 1 - t @f$, where @f$ q @f$ is the size of the finite field over which the curve was defined. """ trace_congruences = [] search_range = hasse_frobenius_trace_range( curve.field() ) modulo_primes = greedy_prime_factors( len(search_range), curve.field().characteristic() ) # To avoid multivariate polynomial arithmetic, make l=2 a special case. if 2 in modulo_primes: trace_congruences.append( frobenius_trace_mod_2( curve ) ) modulo_primes.remove( 2 ) torsion_group = LTorsionGroup( curve ) for prime in modulo_primes: trace_congruences.append( frobenius_trace_mod_l( torsion_group( prime ) ) ) trace_congruence = solve_congruence_equations( trace_congruences ) return representative_in_range( trace_congruence, search_range ) | 7cc864441f472f2e8b01251a88abb79d737c24ac /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10264/7cc864441f472f2e8b01251a88abb79d737c24ac/reduced_computation_schoof.py |
modulo_primes.remove( 2 ) | torsion_primes.remove( 2 ) | def frobenius_trace(curve): """ Compute the trace of the Frobenius endomorphism for the given EllpiticCurve @p curve. This is an implementation of Schoof's original algorithm for counting the points of an elliptic curve over a finite field. @return The trace @f$ t @f$ of the Frobenius endomorphism. The number of points on the curve then is @f$ q + 1 - t @f$, where @f$ q @f$ is the size of the finite field over which the curve was defined. """ trace_congruences = [] search_range = hasse_frobenius_trace_range( curve.field() ) modulo_primes = greedy_prime_factors( len(search_range), curve.field().characteristic() ) # To avoid multivariate polynomial arithmetic, make l=2 a special case. if 2 in modulo_primes: trace_congruences.append( frobenius_trace_mod_2( curve ) ) modulo_primes.remove( 2 ) torsion_group = LTorsionGroup( curve ) for prime in modulo_primes: trace_congruences.append( frobenius_trace_mod_l( torsion_group( prime ) ) ) trace_congruence = solve_congruence_equations( trace_congruences ) return representative_in_range( trace_congruence, search_range ) | 7cc864441f472f2e8b01251a88abb79d737c24ac /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10264/7cc864441f472f2e8b01251a88abb79d737c24ac/reduced_computation_schoof.py |
for prime in modulo_primes: | for prime in torsion_primes: | def frobenius_trace(curve): """ Compute the trace of the Frobenius endomorphism for the given EllpiticCurve @p curve. This is an implementation of Schoof's original algorithm for counting the points of an elliptic curve over a finite field. @return The trace @f$ t @f$ of the Frobenius endomorphism. The number of points on the curve then is @f$ q + 1 - t @f$, where @f$ q @f$ is the size of the finite field over which the curve was defined. """ trace_congruences = [] search_range = hasse_frobenius_trace_range( curve.field() ) modulo_primes = greedy_prime_factors( len(search_range), curve.field().characteristic() ) # To avoid multivariate polynomial arithmetic, make l=2 a special case. if 2 in modulo_primes: trace_congruences.append( frobenius_trace_mod_2( curve ) ) modulo_primes.remove( 2 ) torsion_group = LTorsionGroup( curve ) for prime in modulo_primes: trace_congruences.append( frobenius_trace_mod_l( torsion_group( prime ) ) ) trace_congruence = solve_congruence_equations( trace_congruences ) return representative_in_range( trace_congruence, search_range ) | 7cc864441f472f2e8b01251a88abb79d737c24ac /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10264/7cc864441f472f2e8b01251a88abb79d737c24ac/reduced_computation_schoof.py |
def frobenius_trace_mod_l(torsion_group): """ Compute the trace of the Frobenius endomorphism modulo @f$ l @f$, where @f$ l @f$ is the torsion of @p torsion_group. The function guesses candidates and verifies whether the function that results from applying the characteristic polynomial @f$ \chi_\phi @f$ to @f$ \phi @f$ maps every point in the @p torsion_group onto the point at infinity. @note A torsion of 2 requires multivariate polynomial arithmetic, which is unavailable. Therefore @f$ l @f$ must be greater than 2. Use frobenius_trace_mod_2() to get the trace modulo 2. @return The congruence class of the trace of the Frobenius endomorphism. This is an element of @c QuotientRing( Integers, l ). """ assert torsion_group.torsion() > 2, \ "torsion 2 requires multivariate polynomial arithmetic" torsion_quotient_ring = QuotientRing( Integers, torsion_group.torsion() ) field_size = torsion_group.curve().field().size() # FIXME: Technically, there could be several points so we have to filter # the one candidate that worked for all points in the end. for point in torsion_group.elements(): frobenius_point = frobenius( point, field_size ) frobenius2_point = frobenius( frobenius_point, field_size ) determinant_point = ( field_size % torsion_group.torsion() ) * point point_sum = frobenius2_point + determinant_point if point_sum.is_infinite(): return torsion_quotient_ring( 0 ) trace_point = frobenius_point for trace_candidate in range( 1, (torsion_group.torsion()+1) // 2 ): if point_sum.x() == trace_point.x(): if point_sum.y() == trace_point.y(): return torsion_quotient_ring( trace_candidate ) else: return torsion_quotient_ring( -trace_candidate ) else: trace_point += frobenius_point message = "Frobenius equation held for no trace candidate" raise ArithmeticError( message ) | 7cc864441f472f2e8b01251a88abb79d737c24ac /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10264/7cc864441f472f2e8b01251a88abb79d737c24ac/reduced_computation_schoof.py |
||
def greedy_prime_factors(n, shunned=0): """ Return a list of the first primes whose product is greater than, or equal to @p n, but do not use @p shunned. For example, if @p n is 14, then the returned list will consist of 3 and 5, but not 2, because 3 times 5 is greater than 14. The function behaves like inverse_primorial() except that it removes unnecessary smaller primes. @note Canceling of unnecessary primes follows a greedy algorithm. Therefore the choice of primes might be suboptimal; perfect choice, however, is an NP-complete problem (KNAPSACK). @note This function uses primes_range() to obtain a list of primes. See the notes there for use case limitations. """ primes = primes_range( 2, n+1 ) # Find the smallest product of primes that is at least n product = 1 for index, prime in enumerate( primes ): if prime != shunned: product *= prime if product >= n: break # Throw away excess primes primes = primes[ : index+1 ] if shunned in primes: primes.remove( shunned ) # Try to cancel unnecessary primes, largest first. # (This greedy search is not optimal; however, we did not set out to solve # the KNAPSACK problem, did we?) for index, prime in enumerate( reversed( primes ) ): canceled_product = product / prime if canceled_product >= n: product = canceled_product primes[ -(index+1) ] = 0 return list( filter( None, primes ) ) | 7cc864441f472f2e8b01251a88abb79d737c24ac /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10264/7cc864441f472f2e8b01251a88abb79d737c24ac/reduced_computation_schoof.py |
||
if not os.path.islink(line): logging.info("creating symlink from %s to %s", reposfilepath + line, line) | if not os.path.islink(line) and accesscontrollist.hasacl(line) and not options.ignoreacl: err = "filetoversion has a 'deny' in ACL permissions (ls -lde %s: %s) \n \ This program is currently not clever enough to check if you have permission to move/delete this file. \n \ To avoid this problem remove deny permissions from the access control entries \n \ or rerun this command with --ignoreacl" % (line, accesscontrollist.getacl(line)) logging.warn(err) elif not os.path.islink(line): acl = None | def makesymlinks(repospath): reposfilepath = os.path.abspath(repospath) with open(os.path.join(repospath, SYNCHER_DB_FILENAME)) as db: try: for line in db: line = line.strip() if not os.path.islink(line): logging.info("creating symlink from %s to %s", reposfilepath + line, line) if not options.dry: if os.path.exists(line): acl = None if options.ignoreacl: acl = removeacl(line) util.move(line, line+".beforesyncher")#repospathtoputnewfilein) if acl is not None: accesscontrollist.setacl(line, acl) elif not os.path.exists(os.path.dirname(line)): created = util.makedirs(os.path.dirname(line)) util.symlink(reposfilepath + line, line) else: if not os.path.realpath(line) == reposfilepath + line: logging.warn("%s is already a symbolic link to %s not %s. it will not be followed and linked properly to repository" % (line, os.path.realpath(line), reposfilepath + line)) except Exception as e: logging.warn("ROLLING BACK because of %s" % e) undo.rollback() raise | 89b492e4f795b5a957e6231e2a08d7669894154f /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10713/89b492e4f795b5a957e6231e2a08d7669894154f/symlinkrepos.py |
acl = None | def makesymlinks(repospath): reposfilepath = os.path.abspath(repospath) with open(os.path.join(repospath, SYNCHER_DB_FILENAME)) as db: try: for line in db: line = line.strip() if not os.path.islink(line): logging.info("creating symlink from %s to %s", reposfilepath + line, line) if not options.dry: if os.path.exists(line): acl = None if options.ignoreacl: acl = removeacl(line) util.move(line, line+".beforesyncher")#repospathtoputnewfilein) if acl is not None: accesscontrollist.setacl(line, acl) elif not os.path.exists(os.path.dirname(line)): created = util.makedirs(os.path.dirname(line)) util.symlink(reposfilepath + line, line) else: if not os.path.realpath(line) == reposfilepath + line: logging.warn("%s is already a symbolic link to %s not %s. it will not be followed and linked properly to repository" % (line, os.path.realpath(line), reposfilepath + line)) except Exception as e: logging.warn("ROLLING BACK because of %s" % e) undo.rollback() raise | 89b492e4f795b5a957e6231e2a08d7669894154f /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10713/89b492e4f795b5a957e6231e2a08d7669894154f/symlinkrepos.py |
|
acl = removeacl(line) util.move(line, line+".beforesyncher") if acl is not None: accesscontrollist.setacl(line, acl) | acl = accesscontrollist.removeacl(line) util.move(line, line+"-beforesyncher") | def makesymlinks(repospath): reposfilepath = os.path.abspath(repospath) with open(os.path.join(repospath, SYNCHER_DB_FILENAME)) as db: try: for line in db: line = line.strip() if not os.path.islink(line): logging.info("creating symlink from %s to %s", reposfilepath + line, line) if not options.dry: if os.path.exists(line): acl = None if options.ignoreacl: acl = removeacl(line) util.move(line, line+".beforesyncher")#repospathtoputnewfilein) if acl is not None: accesscontrollist.setacl(line, acl) elif not os.path.exists(os.path.dirname(line)): created = util.makedirs(os.path.dirname(line)) util.symlink(reposfilepath + line, line) else: if not os.path.realpath(line) == reposfilepath + line: logging.warn("%s is already a symbolic link to %s not %s. it will not be followed and linked properly to repository" % (line, os.path.realpath(line), reposfilepath + line)) except Exception as e: logging.warn("ROLLING BACK because of %s" % e) undo.rollback() raise | 89b492e4f795b5a957e6231e2a08d7669894154f /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10713/89b492e4f795b5a957e6231e2a08d7669894154f/symlinkrepos.py |
exc = sys.exc_info()[1] raise exc | exc = sys.exc_info()[1] raise exc | def parseString( self, instring, parseAll=False ): """Execute the parse expression with the given string. This is the main interface to the client code, once the complete expression has been built. | a340e95bb0e330881e7c570990ddda53290b256b /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12364/a340e95bb0e330881e7c570990ddda53290b256b/pyparsing_py3.py |
if self.val.type.code == gdb.TYPE_CODE_RANGE: | if self.val['code'] == gdb.TYPE_CODE_RANGE: | def to_string(self): """Return a pretty-printed image of our main_type. """ fields = [] fields.append("name = %s" % self.val['name']) fields.append("tag_name = %s" % self.val['tag_name']) fields.append("code = %s" % self.val['code']) fields.append("flags = [%s]" % self.flags_to_string()) fields.append("owner = %s" % self.owner_to_string()) fields.append("target_type = %s" % self.val['target_type']) fields.append("vptr_basetype = %s" % self.val['vptr_basetype']) if self.val['nfields'] > 0: for fieldno in range(self.val['nfields']): fields.append(self.struct_field_img(fieldno)) if self.val.type.code == gdb.TYPE_CODE_RANGE: fields.append(self.bounds_img()) fields.append(self.type_specific_img()) | 5a7d297248beb0d4e8ca816a0d3e21ed7c7d709f /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/9879/5a7d297248beb0d4e8ca816a0d3e21ed7c7d709f/gdb-gdb.py |
if self.val.type.code == gdb.TYPE_CODE_RANGE: | if self.val['code'] == gdb.TYPE_CODE_RANGE: | def to_string(self): """Return a pretty-printed image of our main_type. """ fields = [] fields.append("name = %s" % self.val['name']) fields.append("tag_name = %s" % self.val['tag_name']) fields.append("code = %s" % self.val['code']) fields.append("flags = [%s]" % self.flags_to_string()) fields.append("owner = %s" % self.owner_to_string()) fields.append("target_type = %s" % self.val['target_type']) fields.append("vptr_basetype = %s" % self.val['vptr_basetype']) if self.val['nfields'] > 0: for fieldno in range(self.val['nfields']): fields.append(self.struct_field_img(fieldno)) if self.val.type.code == gdb.TYPE_CODE_RANGE: fields.append(self.bounds_img()) fields.append(self.type_specific_img()) | 9bf91e0a3545162e68b84780a6cfd2325fa5fe03 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/9879/9bf91e0a3545162e68b84780a6cfd2325fa5fe03/gdb-gdb.py |
except: pass | except: pass | def scrapeNfo(self, metadata, media, lang): Log("all your datas are belong to us") Log('UPDATE: ' + media.items[0].parts[0].file) path = os.path.dirname(media.items[0].parts[0].file) id=media.title nfoFile='' Log(path) if os.path.exists(path): for f in os.listdir(path): if f.split(".")[-1].lower() == "nfo": nfoName=f.split(".")[0] fname1=media.items[0].parts[0].file.split("/")[-1] fname2=fname1.split(".")[0] if fname2==nfoName: nfoFile = os.path.join(path, f) nfoText = Core.storage.load(nfoFile) #nfoText = t.read() nfoTextLower = nfoText.lower() if nfoTextLower.count('<movie>') > 0 and nfoTextLower.count('</movie>') > 0: #likely an xbmc nfo file nfoXML = XML.ElementFromString(nfoText).xpath('//movie')[0] #title try: metadata.title = nfoXML.xpath('./title')[0].text except: pass #summary try: metadata.summary = nfoXML.xpath('./plot')[0].text except: pass #year try: metadata.year = int(nfoXML.xpath("year")[0].text) except: pass #rating try: metadata.rating = float(nfoXML.xpath('./rating')[0].text) except: pass Log(metadata.rating) #content rating try: metadata.content_rating = nfoXML.xpath('./mpaa')[0].text except: pass #director metadata.directors.clear() try: metadata.directors.add(nfoXML.xpath("director")[0].text) except: pass #studio try: metadata.studio = nfoXML.findall("studio")[0].text except: pass #duration try: metadata.duration = float(nfoXML.xpath("runtime")[0].text) except: pass #genre, cant see mulltiple only sees string not seperate genres metadata.genres.clear() try: tempgenre=nfoXML.xpath('./genre')[0].text genres=tempgenre.split("/") Log(genres) if genres != "": metadata.genres.clear() Log("cleared genres") for r in genres: Log(r) metadata.genres.add(r) Log(metadata.genres) except: pass | e2428c28f2ce50df527813f9898759870d32678e /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/14874/e2428c28f2ce50df527813f9898759870d32678e/__init__.py |
m = re.search('(tt[0-9]+)', metadata.guid) if m: id = m.groups(1)[0] | m = re.search('(tt[0-9]+)', metadata.guid) if m: id = m.groups(1)[0] | def scrapeNfo(self, metadata, media, lang): Log("all your datas are belong to us") Log('UPDATE: ' + media.items[0].parts[0].file) path = os.path.dirname(media.items[0].parts[0].file) id=media.title nfoFile='' Log(path) if os.path.exists(path): for f in os.listdir(path): if f.split(".")[-1].lower() == "nfo": nfoName=f.split(".")[0] fname1=media.items[0].parts[0].file.split("/")[-1] fname2=fname1.split(".")[0] if fname2==nfoName: nfoFile = os.path.join(path, f) nfoText = Core.storage.load(nfoFile) #nfoText = t.read() nfoTextLower = nfoText.lower() if nfoTextLower.count('<movie>') > 0 and nfoTextLower.count('</movie>') > 0: #likely an xbmc nfo file nfoXML = XML.ElementFromString(nfoText).xpath('//movie')[0] #title try: metadata.title = nfoXML.xpath('./title')[0].text except: pass #summary try: metadata.summary = nfoXML.xpath('./plot')[0].text except: pass #year try: metadata.year = int(nfoXML.xpath("year")[0].text) except: pass #rating try: metadata.rating = float(nfoXML.xpath('./rating')[0].text) except: pass Log(metadata.rating) #content rating try: metadata.content_rating = nfoXML.xpath('./mpaa')[0].text except: pass #director metadata.directors.clear() try: metadata.directors.add(nfoXML.xpath("director")[0].text) except: pass #studio try: metadata.studio = nfoXML.findall("studio")[0].text except: pass #duration try: metadata.duration = float(nfoXML.xpath("runtime")[0].text) except: pass #genre, cant see mulltiple only sees string not seperate genres metadata.genres.clear() try: tempgenre=nfoXML.xpath('./genre')[0].text genres=tempgenre.split("/") Log(genres) if genres != "": metadata.genres.clear() Log("cleared genres") for r in genres: Log(r) metadata.genres.add(r) Log(metadata.genres) except: pass | e2428c28f2ce50df527813f9898759870d32678e /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/14874/e2428c28f2ce50df527813f9898759870d32678e/__init__.py |
def scrapeNfo(self, metadata, media, lang): Log("all your datas are belong to us") Log('UPDATE: ' + media.items[0].parts[0].file) path = os.path.dirname(media.items[0].parts[0].file) id=media.title nfoFile='' Log(path) if os.path.exists(path): for f in os.listdir(path): if f.split(".")[-1].lower() == "nfo": nfoName=f.split(".")[0] fname1=media.items[0].parts[0].file.split("/")[-1] fname2=fname1.split(".")[0] if fname2.lower() == "the": fname2=fname2+"." + fname1.split(".")[1] if len(fname2) < len(nfoName): nfoName2=nfoName[len(fname2)] else: nfoName2=nfoName Log(len(fname2)) Log(nfoName2) if fname2==nfoName2: nfoFile = os.path.join(path, f) nfoText = Core.storage.load(nfoFile) nfoTextLower = nfoText.lower() if nfoTextLower.count('<movie>') > 0 and nfoTextLower.count('</movie>') > 0: #likely an xbmc nfo file nfoXML = XML.ElementFromString(nfoText).xpath('//movie')[0] #title try: metadata.title = nfoXML.xpath('./title')[0].text except: pass #summary try: metadata.summary = nfoXML.xpath('./plot')[0].text except: pass #year try: metadata.year = int(nfoXML.xpath("year")[0].text) except: pass #rating try: metadata.rating = float(nfoXML.xpath('./rating')[0].text) except: pass Log(metadata.rating) #content rating try: metadata.content_rating = nfoXML.xpath('./mpaa')[0].text except: pass #director metadata.directors.clear() try: metadata.directors.add(nfoXML.xpath("director")[0].text) except: pass #studio try: metadata.studio = nfoXML.findall("studio")[0].text except: pass #duration try: metadata.duration = float(nfoXML.xpath("runtime")[0].text) except: pass Log(metadata.id) #genre, cant see mulltiple only sees string not seperate genres metadata.genres.clear() try: tempgenre=nfoXML.xpath('./genre')[0].text genres=tempgenre.split("/") except: pass Log(genres) if genres != "": metadata.genres.clear() Log("cleared genres") for r in genres: Log(r) metadata.genres.add(r) Log(metadata.genres) #actors metadata.roles.clear() for actor in nfoXML.findall('./actor'): role = metadata.roles.new() try: role.role = actor.xpath("role")[0].text except: pass try: role.actor = actor.xpath("name")[0].text except: pass try: role.photo = actor.xpath("thumb")[0].text except: pass if role.photo != 'None': data = HTTP.Request(actor.xpath("thumb")[0].text) Log('Added Thumbnail for: ' + role.actor) name = metadata.title if name not in metadata.posters: metadata.posters[name] = Proxy.Media(data) break else: continue Log("++++++++++++++++++++++++") Log("Movie nfo Information") Log("++++++++++++++++++++++++") Log("Title: " + str(metadata.title)) Log("id: " + str(metadata.guid)) Log("Summary: " + str(metadata.summary)) Log("Year: " + str(metadata.year)) Log("IMDB rating: " + str(metadata.rating)) Log("Content Rating: " + str(metadata.content_rating)) Log("Director " + str(metadata.directors)) Log("Studio: " + str(metadata.studio)) Log("Duration: " + str(metadata.duration)) # Log("Actors") # for r in metadata.roles: # Log("Actor: " + r.actor + " | Role: " + r.role) Log("Genres") for r in metadata.genres: Log("genres: " + r) Log(metadata.id) Log("++++++++++++++++++++++++") return id, metadata | 7766e44ff918621f057e618978f8bd8f87dc2542 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/14874/7766e44ff918621f057e618978f8bd8f87dc2542/__init__.py |
||
<<<<<<< HEAD | def search(self, results, media, lang): Log("Searching") fname=Media id=media.name pageUrl="http://localhost:32400/library/metadata/" + media.id page=HTTP.Request(pageUrl) Log(media.primary_metadata) nfoXML = XML.ElementFromURL(pageUrl).xpath('//MediaContainer/Video/Media/Part')[0] path1=nfoXML.get('file') path = os.path.dirname(path1) if os.path.exists(path): for f in os.listdir(path): if f.split(".")[-1].lower() == "nfo": nfoName=f.split(".")[0] fname1=path1.split("/")[-1] fname2=fname1.split(".")[0] if fname2.lower() == "the": fname2=fname2+"." + fname1.split(".")[1] if len(fname2) < len(nfoName): nfoName2=nfoName[len(fname2)] else: nfoName2=nfoName Log(len(fname2)) Log(nfoName2) if fname2==nfoName2: nfoFile = os.path.join(path, f) nfoText = Core.storage.load(nfoFile) nfoTextLower = nfoText.lower() if nfoTextLower.count('<movie>') > 0 and nfoTextLower.count('</movie>') > 0: #likely an xbmc nfo file nfoXML = XML.ElementFromString(nfoText).xpath('//movie')[0] #title try: media.id = nfoXML.xpath('./id')[0].text except: pass | 4e20cce3a10111505cec7c4e88b992777970c7bb /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/14874/4e20cce3a10111505cec7c4e88b992777970c7bb/__init__.py |
|
======= >>>>>>> e5a5e37cb95fd2b91a757284cc694e01ea9da987 | def search(self, results, media, lang): Log("Searching") fname=Media id=media.name pageUrl="http://localhost:32400/library/metadata/" + media.id page=HTTP.Request(pageUrl) Log(media.primary_metadata) nfoXML = XML.ElementFromURL(pageUrl).xpath('//MediaContainer/Video/Media/Part')[0] path1=nfoXML.get('file') path = os.path.dirname(path1) if os.path.exists(path): for f in os.listdir(path): if f.split(".")[-1].lower() == "nfo": nfoName=f.split(".")[0] fname1=path1.split("/")[-1] fname2=fname1.split(".")[0] if fname2.lower() == "the": fname2=fname2+"." + fname1.split(".")[1] if len(fname2) < len(nfoName): nfoName2=nfoName[len(fname2)] else: nfoName2=nfoName Log(len(fname2)) Log(nfoName2) if fname2==nfoName2: nfoFile = os.path.join(path, f) nfoText = Core.storage.load(nfoFile) nfoTextLower = nfoText.lower() if nfoTextLower.count('<movie>') > 0 and nfoTextLower.count('</movie>') > 0: #likely an xbmc nfo file nfoXML = XML.ElementFromString(nfoText).xpath('//movie')[0] #title try: media.id = nfoXML.xpath('./id')[0].text except: pass | 4e20cce3a10111505cec7c4e88b992777970c7bb /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/14874/4e20cce3a10111505cec7c4e88b992777970c7bb/__init__.py |
|
<<<<<<< HEAD | def search(self, results, media, lang): Log("Searching") fname=Media id=media.name pageUrl="http://localhost:32400/library/metadata/" + media.id page=HTTP.Request(pageUrl) Log(media.primary_metadata) nfoXML = XML.ElementFromURL(pageUrl).xpath('//MediaContainer/Video/Media/Part')[0] path1=nfoXML.get('file') path = os.path.dirname(path1) if os.path.exists(path): for f in os.listdir(path): if f.split(".")[-1].lower() == "nfo": nfoName=f.split(".")[0] fname1=path1.split("/")[-1] fname2=fname1.split(".")[0] if fname2.lower() == "the": fname2=fname2+"." + fname1.split(".")[1] if len(fname2) < len(nfoName): nfoName2=nfoName[len(fname2)] else: nfoName2=nfoName Log(len(fname2)) Log(nfoName2) if fname2==nfoName2: nfoFile = os.path.join(path, f) nfoText = Core.storage.load(nfoFile) nfoTextLower = nfoText.lower() if nfoTextLower.count('<movie>') > 0 and nfoTextLower.count('</movie>') > 0: #likely an xbmc nfo file nfoXML = XML.ElementFromString(nfoText).xpath('//movie')[0] #title try: media.id = nfoXML.xpath('./id')[0].text except: pass | 4e20cce3a10111505cec7c4e88b992777970c7bb /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/14874/4e20cce3a10111505cec7c4e88b992777970c7bb/__init__.py |
|
======= name="Nfo_" + media.name results.Append(MetadataSearchResult(id=media.id,name=name,year=3000,lang=lang,score=100)) >>>>>>> e5a5e37cb95fd2b91a757284cc694e01ea9da987 | def search(self, results, media, lang): Log("Searching") fname=Media id=media.name pageUrl="http://localhost:32400/library/metadata/" + media.id page=HTTP.Request(pageUrl) Log(media.primary_metadata) nfoXML = XML.ElementFromURL(pageUrl).xpath('//MediaContainer/Video/Media/Part')[0] path1=nfoXML.get('file') path = os.path.dirname(path1) if os.path.exists(path): for f in os.listdir(path): if f.split(".")[-1].lower() == "nfo": nfoName=f.split(".")[0] fname1=path1.split("/")[-1] fname2=fname1.split(".")[0] if fname2.lower() == "the": fname2=fname2+"." + fname1.split(".")[1] if len(fname2) < len(nfoName): nfoName2=nfoName[len(fname2)] else: nfoName2=nfoName Log(len(fname2)) Log(nfoName2) if fname2==nfoName2: nfoFile = os.path.join(path, f) nfoText = Core.storage.load(nfoFile) nfoTextLower = nfoText.lower() if nfoTextLower.count('<movie>') > 0 and nfoTextLower.count('</movie>') > 0: #likely an xbmc nfo file nfoXML = XML.ElementFromString(nfoText).xpath('//movie')[0] #title try: media.id = nfoXML.xpath('./id')[0].text except: pass | 4e20cce3a10111505cec7c4e88b992777970c7bb /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/14874/4e20cce3a10111505cec7c4e88b992777970c7bb/__init__.py |
|
<<<<<<< HEAD | def scrapeNfo(self, metadata, media, lang): Log("all your datas are belong to us") Log('UPDATE: ' + media.items[0].parts[0].file) path = os.path.dirname(media.items[0].parts[0].file) id=media.title nfoFile='' Log(path) if os.path.exists(path): for f in os.listdir(path): if f.split(".")[-1].lower() == "nfo": nfoName=f.split(".")[0] fname1=media.items[0].parts[0].file.split("/")[-1] fname2=fname1.split(".")[0] if fname2.lower() == "the": fname2=fname2+"." + fname1.split(".")[1] if len(fname2) < len(nfoName): nfoName2=nfoName[len(fname2)] else: nfoName2=nfoName Log(len(fname2)) Log(nfoName2) if fname2==nfoName2: nfoFile = os.path.join(path, f) nfoText = Core.storage.load(nfoFile) nfoTextLower = nfoText.lower() if nfoTextLower.count('<movie>') > 0 and nfoTextLower.count('</movie>') > 0: #likely an xbmc nfo file nfoXML = XML.ElementFromString(nfoText).xpath('//movie')[0] #title try: metadata.title = nfoXML.xpath('./title')[0].text except: pass #summary try: metadata.summary = nfoXML.xpath('./plot')[0].text except: pass #year try: metadata.year = int(nfoXML.xpath("year")[0].text) except: pass #rating try: metadata.rating = float(nfoXML.xpath('./rating')[0].text) except: pass Log(metadata.rating) #content rating try: metadata.content_rating = nfoXML.xpath('./mpaa')[0].text except: pass #director | 4e20cce3a10111505cec7c4e88b992777970c7bb /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/14874/4e20cce3a10111505cec7c4e88b992777970c7bb/__init__.py |
|
======= metadata.directors.clear() try: metadata.directors.add(nfoXML.xpath("director")[0].text) except: pass >>>>>>> e5a5e37cb95fd2b91a757284cc694e01ea9da987 | def scrapeNfo(self, metadata, media, lang): Log("all your datas are belong to us") Log('UPDATE: ' + media.items[0].parts[0].file) path = os.path.dirname(media.items[0].parts[0].file) id=media.title nfoFile='' Log(path) if os.path.exists(path): for f in os.listdir(path): if f.split(".")[-1].lower() == "nfo": nfoName=f.split(".")[0] fname1=media.items[0].parts[0].file.split("/")[-1] fname2=fname1.split(".")[0] if fname2.lower() == "the": fname2=fname2+"." + fname1.split(".")[1] if len(fname2) < len(nfoName): nfoName2=nfoName[len(fname2)] else: nfoName2=nfoName Log(len(fname2)) Log(nfoName2) if fname2==nfoName2: nfoFile = os.path.join(path, f) nfoText = Core.storage.load(nfoFile) nfoTextLower = nfoText.lower() if nfoTextLower.count('<movie>') > 0 and nfoTextLower.count('</movie>') > 0: #likely an xbmc nfo file nfoXML = XML.ElementFromString(nfoText).xpath('//movie')[0] #title try: metadata.title = nfoXML.xpath('./title')[0].text except: pass #summary try: metadata.summary = nfoXML.xpath('./plot')[0].text except: pass #year try: metadata.year = int(nfoXML.xpath("year")[0].text) except: pass #rating try: metadata.rating = float(nfoXML.xpath('./rating')[0].text) except: pass Log(metadata.rating) #content rating try: metadata.content_rating = nfoXML.xpath('./mpaa')[0].text except: pass #director | 4e20cce3a10111505cec7c4e88b992777970c7bb /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/14874/4e20cce3a10111505cec7c4e88b992777970c7bb/__init__.py |
|
<<<<<<< HEAD | def scrapeNfo(self, metadata, media, lang): Log("all your datas are belong to us") Log('UPDATE: ' + media.items[0].parts[0].file) path = os.path.dirname(media.items[0].parts[0].file) id=media.title nfoFile='' Log(path) if os.path.exists(path): for f in os.listdir(path): if f.split(".")[-1].lower() == "nfo": nfoName=f.split(".")[0] fname1=media.items[0].parts[0].file.split("/")[-1] fname2=fname1.split(".")[0] if fname2.lower() == "the": fname2=fname2+"." + fname1.split(".")[1] if len(fname2) < len(nfoName): nfoName2=nfoName[len(fname2)] else: nfoName2=nfoName Log(len(fname2)) Log(nfoName2) if fname2==nfoName2: nfoFile = os.path.join(path, f) nfoText = Core.storage.load(nfoFile) nfoTextLower = nfoText.lower() if nfoTextLower.count('<movie>') > 0 and nfoTextLower.count('</movie>') > 0: #likely an xbmc nfo file nfoXML = XML.ElementFromString(nfoText).xpath('//movie')[0] #title try: metadata.title = nfoXML.xpath('./title')[0].text except: pass #summary try: metadata.summary = nfoXML.xpath('./plot')[0].text except: pass #year try: metadata.year = int(nfoXML.xpath("year")[0].text) except: pass #rating try: metadata.rating = float(nfoXML.xpath('./rating')[0].text) except: pass Log(metadata.rating) #content rating try: metadata.content_rating = nfoXML.xpath('./mpaa')[0].text except: pass #director | 4e20cce3a10111505cec7c4e88b992777970c7bb /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/14874/4e20cce3a10111505cec7c4e88b992777970c7bb/__init__.py |
|
======= metadata.genres.clear() Log("cleared genres") for r in genres: Log(r) metadata.genres.add(r) Log(metadata.genres) metadata.roles.clear() for actor in nfoXML.findall('./actor'): role = metadata.roles.new() try: role.role = actor.xpath("role")[0].text except: pass try: role.actor = actor.xpath("name")[0].text except: pass try: role.photo = actor.xpath("thumb")[0].text except: pass if role.photo != 'None': data = HTTP.Request(actor.xpath("thumb")[0].text) Log('Added Thumbnail for: ' + role.actor) name = metadata.title if name not in metadata.posters: metadata.posters[name] = Proxy.Media(data) break else: continue Log("++++++++++++++++++++++++") Log("Movie nfo Information") Log("++++++++++++++++++++++++") Log("Title: " + str(metadata.title)) Log("id: " + str(metadata.guid)) Log("Summary: " + str(metadata.summary)) Log("Year: " + str(metadata.year)) Log("IMDB rating: " + str(metadata.rating)) Log("Content Rating: " + str(metadata.content_rating)) Log("Director " + str(metadata.directors)) Log("Studio: " + str(metadata.studio)) Log("Duration: " + str(metadata.duration)) Log("Genres") for r in metadata.genres: Log("genres: " + r) Log(metadata.id) Log("++++++++++++++++++++++++") return id, metadata >>>>>>> e5a5e37cb95fd2b91a757284cc694e01ea9da987 | def grabPoster(pUrl=thumb.text, i=i): posterUrl = pUrl Log("Adding: " + pUrl) thumbpic = HTTP.Request(pUrl) metadata.posters[posterUrl] = Proxy.Preview(thumbpic, sort_order = i) | 4e20cce3a10111505cec7c4e88b992777970c7bb /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/14874/4e20cce3a10111505cec7c4e88b992777970c7bb/__init__.py |
|
def system_load(): loads = open('/proc/loadavg').read().split(" ") allusers = users allusers.Select(allusers.FILTER_STANDARD) nbusers = len(allusers.filtered_users) cxusers = len(Popen('who', shell = True, stdin = PIPE, stdout = PIPE, close_fds = True).stdout.read().split('\n')) if cxusers > 1: s_users = 's' else: s_users = '' uptime_sec = int(float(open('/proc/uptime').read().split(" ")[0])) uptime_min = 0 uptime_hour = 0 uptime_day = 0 uptime_year = 0 s_year = '' s_day = '' s_hour = '' s_sec = '' s_min = '' uptime_string = '' if uptime_sec > 60: uptime_min = uptime_sec / 60 uptime_sec -= (uptime_min * 60) if uptime_min > 60: uptime_hour = uptime_min / 60 uptime_min -= (uptime_hour * 60) if uptime_hour > 24: uptime_day = uptime_hour / 24 uptime_hour -= (uptime_day * 24) if uptime_day > 365: uptime_year = uptime_day / 365 uptime_day -= (uptime_year * 365) if uptime_year > 1: s_year = 's' uptime_string += _('%d year%s, ') % (uptime_year, s_year) if uptime_day > 1: s_day = 's' uptime_string += _('%d day%s, ') % (uptime_day, s_day) if uptime_hour > 1: s_hour = 's' uptime_string += _('%d hour%s, ') % (uptime_hour, s_hour) if uptime_min > 1: s_min = 's' uptime_string += _('%d min%s, ') % (uptime_min, s_min) if uptime_sec > 1: s_sec = 's' uptime_string += _('%d sec%s') % (uptime_sec, s_sec) return _('''Up and running since <strong>%s</strong>.<br /><br /> | 16349ceb256a4fadc58fa051cf7cc6d978d7b11e /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/16349ceb256a4fadc58fa051cf7cc6d978d7b11e/base.py |
||
uptime_string += _('%d sec%s') % (uptime_sec, s_sec) | uptime_string += _('%d sec%s') % (uptime_sec, s_sec) | def system_load(): loads = open('/proc/loadavg').read().split(" ") allusers = users allusers.Select(allusers.FILTER_STANDARD) nbusers = len(allusers.filtered_users) cxusers = len(Popen('who', shell = True, stdin = PIPE, stdout = PIPE, close_fds = True).stdout.read().split('\n')) if cxusers > 1: s_users = 's' else: s_users = '' uptime_sec = int(float(open('/proc/uptime').read().split(" ")[0])) uptime_min = 0 uptime_hour = 0 uptime_day = 0 uptime_year = 0 s_year = '' s_day = '' s_hour = '' s_sec = '' s_min = '' uptime_string = '' if uptime_sec > 60: uptime_min = uptime_sec / 60 uptime_sec -= (uptime_min * 60) if uptime_min > 60: uptime_hour = uptime_min / 60 uptime_min -= (uptime_hour * 60) if uptime_hour > 24: uptime_day = uptime_hour / 24 uptime_hour -= (uptime_day * 24) if uptime_day > 365: uptime_year = uptime_day / 365 uptime_day -= (uptime_year * 365) if uptime_year > 1: s_year = 's' uptime_string += _('%d year%s, ') % (uptime_year, s_year) if uptime_day > 1: s_day = 's' uptime_string += _('%d day%s, ') % (uptime_day, s_day) if uptime_hour > 1: s_hour = 's' uptime_string += _('%d hour%s, ') % (uptime_hour, s_hour) if uptime_min > 1: s_min = 's' uptime_string += _('%d min%s, ') % (uptime_min, s_min) if uptime_sec > 1: s_sec = 's' uptime_string += _('%d sec%s') % (uptime_sec, s_sec) return _('''Up and running since <strong>%s</strong>.<br /><br /> | 16349ceb256a4fadc58fa051cf7cc6d978d7b11e /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/16349ceb256a4fadc58fa051cf7cc6d978d7b11e/base.py |
1, 5, and 15 last minutes load average: <strong>%s</strong>, %s, %s''') % (uptime_string, nbusers, cxusers, loads[0], loads[1], loads[2]) | 1, 5, and 15 last minutes load average: <strong>%s</strong>, %s, %s''') % (uptime_string, nbusers, cxusers, loads[0], loads[1], loads[2]) | def system_load(): loads = open('/proc/loadavg').read().split(" ") allusers = users allusers.Select(allusers.FILTER_STANDARD) nbusers = len(allusers.filtered_users) cxusers = len(Popen('who', shell = True, stdin = PIPE, stdout = PIPE, close_fds = True).stdout.read().split('\n')) if cxusers > 1: s_users = 's' else: s_users = '' uptime_sec = int(float(open('/proc/uptime').read().split(" ")[0])) uptime_min = 0 uptime_hour = 0 uptime_day = 0 uptime_year = 0 s_year = '' s_day = '' s_hour = '' s_sec = '' s_min = '' uptime_string = '' if uptime_sec > 60: uptime_min = uptime_sec / 60 uptime_sec -= (uptime_min * 60) if uptime_min > 60: uptime_hour = uptime_min / 60 uptime_min -= (uptime_hour * 60) if uptime_hour > 24: uptime_day = uptime_hour / 24 uptime_hour -= (uptime_day * 24) if uptime_day > 365: uptime_year = uptime_day / 365 uptime_day -= (uptime_year * 365) if uptime_year > 1: s_year = 's' uptime_string += _('%d year%s, ') % (uptime_year, s_year) if uptime_day > 1: s_day = 's' uptime_string += _('%d day%s, ') % (uptime_day, s_day) if uptime_hour > 1: s_hour = 's' uptime_string += _('%d hour%s, ') % (uptime_hour, s_hour) if uptime_min > 1: s_min = 's' uptime_string += _('%d min%s, ') % (uptime_min, s_min) if uptime_sec > 1: s_sec = 's' uptime_string += _('%d sec%s') % (uptime_sec, s_sec) return _('''Up and running since <strong>%s</strong>.<br /><br /> | 16349ceb256a4fadc58fa051cf7cc6d978d7b11e /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/16349ceb256a4fadc58fa051cf7cc6d978d7b11e/base.py |
return _(''' | if mem['SwapTotal'] == 0: swap_message = _("no virtual memory installed.") else: swap_message = \ _("Virtual memory: %.2f Gb total, <strong>%.0f%% free<strong>.") % \ (mem['SwapTotal'], (mem['SwapFree'] * 100.0 / mem['SwapTotal'])) return (_(''' | def compute_mem(line, x): #logging.debug(line[0:-1] + " -> " + re.split('\W+', line)[1]) | 16349ceb256a4fadc58fa051cf7cc6d978d7b11e /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/16349ceb256a4fadc58fa051cf7cc6d978d7b11e/base.py |
Virtual memory: %.2f Gb total, <strong>%.0f%% free<strong>. ''') % (s, cpus, model, mem['MemTotal'], (mem['Inactive'] + mem['Active']), mem['Cached'], mem['Buffers'], mem['SwapTotal'], (mem['SwapFree'] * 100.0 / mem['SwapTotal']) ) | %s''') % (s, cpus, model, mem['MemTotal'], (mem['Inactive'] + mem['Active']), mem['Cached'], mem['Buffers'], swap_message)) | def compute_mem(line, x): #logging.debug(line[0:-1] + " -> " + re.split('\W+', line)[1]) | 16349ceb256a4fadc58fa051cf7cc6d978d7b11e /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/16349ceb256a4fadc58fa051cf7cc6d978d7b11e/base.py |
def index(uri, http_user): start = time.time() title = _("Server status") data = '<div id="banner">\n%s\n%s\n%s\n</div><!-- banner -->\n<div id="main">\n%s\n<div id="content">' % (w.backto(), w.metanav(http_user), w.menu(uri), ctxtnav()) data += '''<table> <tr> <td><h1>%s</h1><br />%s</td> <td><h1>%s</h1>%s</td> </tr> | 16349ceb256a4fadc58fa051cf7cc6d978d7b11e /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/16349ceb256a4fadc58fa051cf7cc6d978d7b11e/base.py |
||
data = '<div id="banner">\n%s\n%s\n%s\n</div><!-- banner -->\n<div id="main">\n%s\n<div id="content">' % (w.backto(), w.metanav(http_user), w.menu(uri), ctxtnav()) | data = '<div id="banner">\n%s\n%s\n%s\n</div><!-- banner -->\n<div id="main">\n%s\n<div id="content">' % (w.backto(), w.metanav(http_user), w.menu(uri), ctxtnav()) | def index(uri, http_user): start = time.time() title = _("Server status") data = '<div id="banner">\n%s\n%s\n%s\n</div><!-- banner -->\n<div id="main">\n%s\n<div id="content">' % (w.backto(), w.metanav(http_user), w.menu(uri), ctxtnav()) data += '''<table> <tr> <td><h1>%s</h1><br />%s</td> <td><h1>%s</h1>%s</td> </tr> | 16349ceb256a4fadc58fa051cf7cc6d978d7b11e /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/16349ceb256a4fadc58fa051cf7cc6d978d7b11e/base.py |
LicornMessage(data=text_message), | LicornMessage(data=text_message, channel=1), | def output(self, text_message): return current_thread().listener.process( LicornMessage(data=text_message), options.msgproc.getProxy()) | d5980ccaf85371bb2f2dd030582cd63c6e7979c5 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/d5980ccaf85371bb2f2dd030582cd63c6e7979c5/rwi.py |
self.reload() | self.reload(full=False) | def __init__(self, configuration): """ Create the user accounts list from the underlying system. """ | 8298bb4c166c10335fde7321145b88da300e88e1 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/8298bb4c166c10335fde7321145b88da300e88e1/users.py |
def reload(self): | def reload(self, full=True): | def reload(self): """ Load (or reload) the data structures from the system data. """ | 8298bb4c166c10335fde7321145b88da300e88e1 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/8298bb4c166c10335fde7321145b88da300e88e1/users.py |
users.reload() | def main(uri, http_user, sort = "login", order = "asc"): """ display all users in a nice HTML page. """ start = time.time() groups.reload() users.reload() # profiles.reload() u = users.users g = groups.groups p = profiles.profiles groups.Select(filters.PRIVILEGED) pri_grps = [ g[gid]['name'] for gid in groups.filtered_groups ] groups.Select(filters.RESPONSIBLE) rsp_grps = [ g[gid]['name'] for gid in groups.filtered_groups ] groups.Select(filters.GUEST) gst_grps = [ g[gid]['name'] for gid in groups.filtered_groups ] groups.Select(filters.STANDARD) std_grps = [ g[gid]['name'] for gid in groups.filtered_groups ] accounts = {} ordered = {} totals = {} prof = {} for profile in p: prof[groups.name_to_gid(profile)] = p[profile] totals[p[profile]['name']] = 0 totals[_('Standard account')] = 0 title = _("User accounts") data = w.page_body_start(uri, http_user, ctxtnav, title) if order == "asc": reverseorder = "desc" else: reverseorder = "asc" data += '<table>\n <tr>\n' for (sortcolumn, sortname) in ( ("gecos", _("Full name")), ("login", _("Identifier")), ("profile", _("Profile")), ("locked", _("Locked")) ): if sortcolumn == sort: data += ''' <th><img src="/images/sort_%s.gif" alt="%s order image" />  <a href="/users/list/%s/%s" title="%s">%s</a> </th>\n''' % (order, order, sortcolumn, reverseorder, _("Click to sort in reverse order."), sortname) else: data += ''' <th><a href="/users/list/%s/asc" title="%s">%s</a></th>\n''' % (sortcolumn, _("Click to sort on this column."), sortname) data += ' </tr>\n' def html_build_compact(index, accounts = accounts): uid = ordered[index] login = u[uid]['login'] edit = (_('''<em>Click to edit current user account parameters:</em> <br /> UID: <strong>%d</strong><br /> GID: %d (primary group <strong>%s</strong>)<br /><br /> Groups: <strong>%s</strong><br /><br /> Privileges: <strong>%s</strong><br /><br /> Responsabilities: <strong>%s</strong><br /><br /> Invitations: <strong>%s</strong><br /><br /> ''') % ( uid, u[uid]['gidNumber'], g[u[uid]['gidNumber']]['name'], ", ".join(filter(lambda x: x in std_grps, u[uid]['groups'])), ", ".join(filter(lambda x: x in pri_grps, u[uid]['groups'])), ", ".join(filter(lambda x: x in rsp_grps, u[uid]['groups'])), ", ".join(filter( lambda x: x in gst_grps, u[uid]['groups'])))).replace( '<','<').replace('>','>') html_data = ''' <tr class="userdata"> <td class="paddedleft"> <a href="/users/edit/%s" title="%s" class="edit-entry">%s</a> </td> <td class="paddedright"> <a href="/users/edit/%s" title="%s" class="edit-entry">%s</a> </td> <td style="text-align:center;">%s</td> ''' % (login, edit, u[uid]['gecos'], login, edit, login, accounts[uid]['profile_name']) if u[uid]['locked']: html_data += ''' <td class="user_action_center"> <a href="/users/unlock/%s" title="%s"> <img src="/images/16x16/locked.png" alt="%s"/></a> </td> ''' % (login, _("Unlock password (re-grant access to machines)."), _("Remove account.")) else: html_data += ''' <td class="user_action_center"> <a href="/users/lock/%s" title="%s"> <img src="/images/16x16/unlocked.png" alt="%s"/></a> </td> ''' % (login, _("Lock password (revoke access to machines)."), _("Lock account.")) html_data += ''' <td class="user_action"> <a href="/users/skel/%s" title="%s" class="reapply-skel"> <span class="delete-entry"> </span></a> </td> <td class="user_action"> <a href="/users/delete/%s" title="%s" class="delete-entry"> <span class="delete-entry"> </span></a> </td> </tr> ''' % (login, _('''Reapply origin skel data in the personnal ''' '''directory of user. This is usefull''' ''' when user has lost icons, or modified too much his/her ''' '''desktop (menus, panels and so on). This will get all his/her desktop back.'''), login, _("Definitely remove account from the system.")) return html_data users.Select(filters.STANDARD) for uid in users.filtered_users: user = u[uid] login = user['login'] # we add the login to gecosValue and lockedValue to be sure to obtain # unique values. This prevents problems with empty or non-unique GECOS # and when sorting on locked status (accounts would be overwritten and # lost because sorting must be done on unique values). accounts[uid] = { 'login' : login, 'gecos' : user['gecos'] + login , 'locked' : str(user['locked']) + login } try: p = prof[user['gidNumber']]['name'] except KeyError: p = _("Standard account") accounts[uid]['profile'] = "%s %s" % ( p, login ) accounts[uid]['profile_name'] = p totals[p] += 1 # index on the column choosen for sorting, and keep trace of the uid # to find account data back after ordering. ordered[hlstr.validate_name(accounts[uid][sort])] = uid memberkeys = ordered.keys() memberkeys.sort() if order == "desc": memberkeys.reverse() data += ''.join(map(html_build_compact, memberkeys)) def print_totals(totals): output = "" for total in totals: if totals[total] != 0: output += ''' <tr class="list_total"> <td colspan="3" class="total_left">%s</td> <td colspan="3" class="total_right">%d</td> </tr> ''' % (_("number of <strong>%s</strong>:") % total, totals[total]) return output data += ''' <tr> <td colspan="6"> </td></tr> %s <tr class="list_total"> <td colspan="3" class="total_left">%s</td> <td colspan="3" class="total_right">%d</td> </tr> | 0ebcb08d8fdf6b58581b325dbdf0cbb7cb7b3771 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/0ebcb08d8fdf6b58581b325dbdf0cbb7cb7b3771/users.py |
|
self.current_target_object, self.args, self.kwargs | self.current_target, self.args, self.kwargs | def dump_status(self, long_output=False, precision=None): """ get detailled thread status. """ | a3db7d295a7a1a61d223bfc2474db829f8a19f66 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/a3db7d295a7a1a61d223bfc2474db829f8a19f66/threads.py |
self.current_target_object) \ | self.current_target) \ | def dump_status(self, long_output=False, precision=None): """ get detailled thread status. """ | a3db7d295a7a1a61d223bfc2474db829f8a19f66 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/a3db7d295a7a1a61d223bfc2474db829f8a19f66/threads.py |
command.extend([ "--skel", skel ]) | command.extend(["--skel", skel]) | def record(uri, http_user, name, skel=None, permissive=False, description=None, members_source = [], members_dest = [], resps_source = [], resps_dest = [], guests_source = [], guests_dest = [], record = None): """Record group changes.""" # web submit -> forget it del record title = _("Modifying group %s") % name data = '%s<h1>%s</h1>' % (w.backto(), title) command = [ 'sudo', 'mod', 'group', '--quiet', '--no-colors', '--name', name ] if skel: command.extend([ "--skel", skel ]) add_members = ','.join(__merge_multi_select(members_dest)) del_members = ','.join(__merge_multi_select(members_source)) add_resps = ','.join(__merge_multi_select(resps_dest)) del_resps = ','.join(__merge_multi_select(resps_source)) add_guests = ','.join(__merge_multi_select(guests_dest)) del_guests = ','.join(__merge_multi_select(guests_source)) for (var, cmd) in ( (add_members, "--add-users"), (del_members, "--del-users"), (add_resps, "--add-resps"), (del_resps, '--del-resps'), (add_guests, "--add-guests"), (del_guests, '--del-guests') ): if var != "": command.extend([ cmd, var ]) return (w.HTTP_TYPE_TEXT, w.page(title, data + w.run(command, uri, successfull_redirect = "/groups/list", err_msg = _('Failed to modify one or more parameter of group %s!') % \ name))) | d9449425cb5de89ee21ea727bbf57a04644d6cfc /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/d9449425cb5de89ee21ea727bbf57a04644d6cfc/groups.py |
execute([ 'sudo', 'rm', '-rf', '%s/*' % configuration.home_backup_dir, '%s/*' % configuration.home_archive_dir ]) execute(ADD + ['group', '--system', 'acl,admins,remotessh,licorn-wmi']) | for directory in ( configuration.home_backup_dir, configuration.home_archive_dir ): clean_dir_contents(directory) execute(ADD + ['group', '--system', 'acl,admins,remotessh,licorn-wmi']) | def clean_system(): """ Remove all stuff to make the system clean, testsuite-wise.""" test_message('''cleaning system from previous runs.''') # delete them first in case of a previous failed testsuite run. # don't check exit codes or such, this will be done later. for argument in ( ['user', '''toto,tutu,tata,titi,test,utilisager.normal,''' \ '''test.responsibilly,utilicateur.accentue,user_test,''' \ '''grp-acl-user,utest_267,user_test2,user_test3,user_testsys,''' \ '''user_testsys2,user_testsys3''', '--no-archive'], ['profile', '''--group=utilisagers,responsibilisateurs,''' '''profil_test''', '--del-users', '--no-archive'], ['group', '''test_users_A,test_users_B,groupeA,B-Group_Test,''' \ '''groupe_a_skel,ACL_tests,MOD_tests,SYSTEM-test,SKEL-tests,''' \ '''ARCHIVES-test,group_test,group_testsys,group_test2,''' '''group_test3,GRP-ACL-test,gtest_267,group_testsys''' ], ['privilege', '--name=group_test' ] ): execute(DEL + argument) execute([ 'sudo', 'rm', '-rf', '%s/*' % configuration.home_backup_dir, '%s/*' % configuration.home_archive_dir ]) execute(ADD + ['group', '--system', 'acl,admins,remotessh,licorn-wmi']) test_message('''system cleaned from previous testsuite runs.''') | f349e3342b6e7a4d6ed5a9284273477b19d7fb9e /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/f349e3342b6e7a4d6ed5a9284273477b19d7fb9e/core.py |
ScenarioTest([ [ 'sudo', 'rm', '-vrf', '%s/*' % configuration.home_archive_dir ], | clean_dir_contents(configuration.home_archive_dir) ScenarioTest([ | def chk_acls_cmds(group, subdir=None): return [ 'sudo', 'getfacl', '-R', '%s/%s/%s%s' % ( configuration.defaults.home_base_path, configuration.groups.names['plural'], group, '/%s' % subdir if subdir else '') ] | f349e3342b6e7a4d6ed5a9284273477b19d7fb9e /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/f349e3342b6e7a4d6ed5a9284273477b19d7fb9e/core.py |
[ 'sudo', 'getfacl', '-R', configuration.home_archive_dir ], [ 'sudo', 'rm', '-vrf', '%s/*' % configuration.home_archive_dir ] | [ 'sudo', 'getfacl', '-R', configuration.home_archive_dir ] | def chk_acls_cmds(group, subdir=None): return [ 'sudo', 'getfacl', '-R', '%s/%s/%s%s' % ( configuration.defaults.home_base_path, configuration.groups.names['plural'], group, '/%s' % subdir if subdir else '') ] | f349e3342b6e7a4d6ed5a9284273477b19d7fb9e /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/f349e3342b6e7a4d6ed5a9284273477b19d7fb9e/core.py |
logging.warning( 'Adding a default profile on the system (this is mandatory).') | logging.warning('''Adding a default %s profile on the system ''' '''(this is mandatory).''' % styles.stylize(styles.ST_NAME, 'Users')) | def checkDefaultProfile(self): """If no profile exists on the system, create a default one with system group "users".""" | 3482744910e32cde6e9d054b800a2d84b6cd8199 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/3482744910e32cde6e9d054b800a2d84b6cd8199/profiles.py |
description='', profileShell=None, profileSkel=None, force_existing=False): | description='', profileShell=None, profileSkel=None, force_existing=False): | def AddProfile(self, name, group, profileQuota=1024, groups=[], description='', profileShell=None, profileSkel=None, force_existing=False): """ Add a user profile (self.groups is an instance of GroupsController and is needed to create the profile group). """ | 3482744910e32cde6e9d054b800a2d84b6cd8199 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/3482744910e32cde6e9d054b800a2d84b6cd8199/profiles.py |
lenghts[0], lenghts[1], lenghts[2], lenghts[3], lenghts[4], lenghts[5], lenghts[6], lenghts[7] | lengths[0], lengths[1], lengths[2], lengths[3], lengths[4], lengths[5], lengths[6], lengths[7] | def run(self): logging.progress("%s: thread running." % (self.name)) Thread.run(self) | 97fed721fd6cf0a03ac4998f18a51e7f5e89dfc2 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/97fed721fd6cf0a03ac4998f18a51e7f5e89dfc2/inotifier.py |
if(lenghts[7] > 0): | if(lengths[7] > 0): | def run(self): logging.progress("%s: thread running." % (self.name)) Thread.run(self) | 97fed721fd6cf0a03ac4998f18a51e7f5e89dfc2 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/97fed721fd6cf0a03ac4998f18a51e7f5e89dfc2/inotifier.py |
dest = list(user['groups'].copy()) | dest = list(user['groups'][:]) | def edit(uri, http_user, login): """Edit an user account, based on login.""" users.reload() groups.reload() # profiles.reload() title = _('Edit account %s') % login if protected_user(login): return w.forgery_error(title) data = w.page_body_start(uri, http_user, ctxtnav, title, False) try: user = users.users[users.login_to_uid(login)] try: profile = \ profiles.profiles[ groups.groups[user['gidNumber']]['name'] ]['name'] except KeyError: profile = _("Standard account") dbl_lists = {} for filter, titles, id in groups_filters_lists_ids: groups.Select(filter) dest = list(user['groups'].copy()) source = [ groups.groups[gid]['name'] \ for gid in groups.filtered_groups ] for current in dest[:]: try: source.remove(current) except ValueError: dest.remove(current) dest.sort() source.sort() dbl_lists[filter] = w.doubleListBox(titles, id, source, dest) form_name = "user_edit_form" data += '''<div id="edit_form"> | 7fe4b9d7097d5e3724f4e48b9ff9a25383843212 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/7fe4b9d7097d5e3724f4e48b9ff9a25383843212/users.py |
count += 1 | def fork_wmi(opts, start_wmi = True): """ Start the Web Management Interface (fork it). """ # FIXME: implement start_wmi in argparser module. try: if os.fork() == 0: # FIXME: drop_privileges() → become setuid('licorn:licorn') process.write_pid_file(wpid_path) if opts.daemon: process.use_log_file(wlog_path) pname = '%s/wmi' % dname process.set_name(pname) logging.progress("%s: starting (pid %d)." % (pname, os.getpid())) setup_signals_handler(pname) if opts.wmi_listen_address: # the CLI launch argument has priority over the configuration # directive, for testing purposes. listen_address = opts.wmi_listen_address elif configuration.daemon.wmi.listen_address: listen_address = configuration.daemon.wmi.listen_address else: # the fallback is localhost listen_address = 'localhost' if listen_address.startswith('if:') \ or listen_address.startswith('iface:') \ or listen_address.startswith('interface:'): raise NotImplementedError( 'getting interface address is not yet implemented.') logging.progress('%s: bind on listen address %s.' % ( pname, styles.stylize(styles.ST_ADDRESS, listen_address))) count = 0 while True: # try creating an http server. # if it fails because of socket already in use, just retry # forever, displaying a message every second. # # when creation succeeds, break the loop and serve requets. count += 1 try: httpd = TCPServer((listen_address, wmi_port), WMIHTTPRequestHandler) break except socket.error, e: if e[0] == 98: logging.warning("%s/wmi: socket already in use. waiting (total: %dsec)." % (dname, count)) time.sleep(1) else: logging.error("%s/wmi: socket error %s." % (dname, e)) return httpd.serve_forever() except OSError, e: logging.error("%s/wmi: fork failed: errno %d (%s)." % (dname, e.errno, e.strerror)) except KeyboardInterrupt: logging.warning('%s/wmi: terminating on interrupt signal.' % dname) raise SystemExit | a40ab54b4c27c2712b82d1ed670b65e2e643ac88 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/a40ab54b4c27c2712b82d1ed670b65e2e643ac88/wmi.py |
|
logging.warning("%s/wmi: socket already in use. waiting (total: %dsec)." % (dname, count)) | logging.warning("%s/wmi: socket already in use. waiting (total: %ds)." % (dname, count)) count += 1 | def fork_wmi(opts, start_wmi = True): """ Start the Web Management Interface (fork it). """ # FIXME: implement start_wmi in argparser module. try: if os.fork() == 0: # FIXME: drop_privileges() → become setuid('licorn:licorn') process.write_pid_file(wpid_path) if opts.daemon: process.use_log_file(wlog_path) pname = '%s/wmi' % dname process.set_name(pname) logging.progress("%s: starting (pid %d)." % (pname, os.getpid())) setup_signals_handler(pname) if opts.wmi_listen_address: # the CLI launch argument has priority over the configuration # directive, for testing purposes. listen_address = opts.wmi_listen_address elif configuration.daemon.wmi.listen_address: listen_address = configuration.daemon.wmi.listen_address else: # the fallback is localhost listen_address = 'localhost' if listen_address.startswith('if:') \ or listen_address.startswith('iface:') \ or listen_address.startswith('interface:'): raise NotImplementedError( 'getting interface address is not yet implemented.') logging.progress('%s: bind on listen address %s.' % ( pname, styles.stylize(styles.ST_ADDRESS, listen_address))) count = 0 while True: # try creating an http server. # if it fails because of socket already in use, just retry # forever, displaying a message every second. # # when creation succeeds, break the loop and serve requets. count += 1 try: httpd = TCPServer((listen_address, wmi_port), WMIHTTPRequestHandler) break except socket.error, e: if e[0] == 98: logging.warning("%s/wmi: socket already in use. waiting (total: %dsec)." % (dname, count)) time.sleep(1) else: logging.error("%s/wmi: socket error %s." % (dname, e)) return httpd.serve_forever() except OSError, e: logging.error("%s/wmi: fork failed: errno %d (%s)." % (dname, e.errno, e.strerror)) except KeyboardInterrupt: logging.warning('%s/wmi: terminating on interrupt signal.' % dname) raise SystemExit | a40ab54b4c27c2712b82d1ed670b65e2e643ac88 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/a40ab54b4c27c2712b82d1ed670b65e2e643ac88/wmi.py |
def minifind(path, type=None, perms=None, mindepth=0, maxdepth=99, exclude=[], followlinks=False, followmounts=True): """ Mimic the GNU find behaviour in python. returns an iterator. """ if mindepth > maxdepth: raise exceptions.BadArgumentError("mindepth must be <= maxdepth.") if maxdepth > 99: raise exceptions.BadArgumentError( "please don't try to exhaust maxdepth.") assert ltrace('fsapi', '''> minifind(%s, type=%s, mindepth=%s, maxdepth=%s, ''' '''exclude=%s, followlinks=%s, followmounts=%s)''' % ( path, type, mindepth, maxdepth, exclude, followlinks, followmounts)) paths_to_walk = [ path ] next_paths_to_walk = [] current_depth = 0 S_IFSTD = S_IFDIR | S_IFREG while True: if paths_to_walk != []: entry = paths_to_walk.pop(0) elif next_paths_to_walk != []: paths_to_walk = next_paths_to_walk next_paths_to_walk = [] entry = paths_to_walk.pop(0) current_depth += 1 else: break try: entry_stat = os.lstat(entry) entry_type = entry_stat.st_mode & 0170000 entry_mode = entry_stat.st_mode & 07777 except (IOError, OSError), e: if e.errno == 2 or (e.errno == 13 and entry[-5:] == '.gvfs'): continue else: raise e else: if current_depth >= mindepth \ and ( (type is None and entry_type & S_IFSTD) \ or entry_type == type) \ and ( perms is None or (entry_mode & perms) ): #ltrace('fsapi', ' minifind(yield=%s)' % entry) yield entry #print 'type %s %s %s' % (entry_type, S_IFLNK, entry_type & S_IFLNK) if (entry_type == S_IFLNK and not followlinks) \ or (os.path.ismount(entry) and not followmounts): logging.progress('minifind(): skipping link or mountpoint %s.' % stylize(ST_PATH, entry)) continue if entry_type == S_IFDIR and current_depth < maxdepth: try: for x in os.listdir(entry): if x not in exclude: next_paths_to_walk.append("%s/%s" % (entry, x)) else: assert ltrace('fsapi', ' minifind(excluded=%s)' % entry) except (IOError, OSError), e: if e.errno == 2: # happens on recursive delete() applyed on minifind() # results: the dir vanishes during the os.listdir(). continue else: raise e | 35fc5c64dfc0567f1862ec7ffb81a147ef2b8f43 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/35fc5c64dfc0567f1862ec7ffb81a147ef2b8f43/fsapi.py |
||
def check_dirs_and_contents_perms_and_acls_new(dirs_infos, batch=None, | def check_dirs_and_contents_perms_and_acls_new(dirs_infos, batch=False, | def check_dirs_and_contents_perms_and_acls_new(dirs_infos, batch=None, auto_answer=None): """ general function to check file/dir """ def check_one_dir_and_acl(dir_info, batch=batch, auto_answer=auto_answer): all_went_ok = True # save desired user and group owner of the file/dir try: if dir_info.user: uid = dir_info['user'] else: uid = -1 if dir_info.group and dir_info.group != '': gid = dir_info['group'] else: gid = -1 except KeyError, e: raise exceptions.LicornRuntimeError('''You just encountered a ''' '''programmer bug. Get in touch with [email protected] (was: ''' '''%s).''' % e) except exceptions.LicornRuntimeException, e: raise exceptions.LicornRuntimeError('''The uid/gid you want to ''' '''check against does not exist on this system ! This ''' '''shouldn't happen and is probably a programmer/packager ''' '''bug. Get in touch with [email protected] (was: %s).''' % e) # Does the file/dir exist ? try: entry_stat = os.lstat(dir_info['path']) except OSError, e: if e.errno == 13: raise exceptions.InsufficientPermissionsError(str(e)) elif e.errno == 2: raise exceptions.DoesntExistsException(str(e)) else: # FIXME: do more things to recover from more system errors… raise e # if it is a file if ( entry_stat.st_mode & 0170000 ) == S_IFREG: logging.progress("Checking file %s…" % stylize(ST_PATH, dir_info['path'])) if dir_info.files_perm and dir_info.user \ and dir_info.group: check_perms( file_type=S_IFREG, dir_info=dir_info, batch=batch) # if it is a dir elif ( entry_stat.st_mode & 0170000 ) == S_IFDIR: logging.progress("Checking dir %s…" % stylize(ST_PATH, dir_info['path'])) # if the directory ends with '/' that mean that we will only # affect the content of the dir. # the dir itself will receive default licorn ACL rights (those # defined in the configuration) if dir_info.path[-1] == '/': dir_info_root = dir_info.copy() dir_info_root.root_dir_acl = True dir_info_root.root_dir_perm = "%s,g:%s:rwx,%s" % ( LMC.configuration.acls.acl_base, LMC.configuration.defaults.admin_group, LMC.configuration.acls.acl_mask) dir_info_root.group = "acl" # now that the "root dir" has its special treatment, # prepare dir_info for the rest (its contents) dir_info.path = dir_info.path[:-1] else: dir_info_root = dir_info logging.progress("Checking %s's %s…" % ( stylize(ST_PATH, dir_info['path']), "ACLs" if dir_info.root_dir_acl else "posix perms")) # deal with root dir check_perms( is_root_dir=True, file_type=S_IFDIR, dir_info=dir_info_root, batch=batch) if dir_info.files_perm != None or dir_info.dirs_perm != None: try: exclude_list = dir_info.exclude except AttributeError : exclude_list = [] if dir_info.files_perm != None: logging.progress("Checking %s's contents %s…" % ( stylize(ST_PATH, dir_info['path']), 'ACLs' if dir_info.content_acl else 'posix perms')) if dir_info.dirs_perm != None: dir_path = dir_info['path'] for dir in minifind(dir_path, exclude=exclude_list, mindepth=1, type=S_IFDIR): dir_info.path=dir check_perms( file_type=S_IFDIR, dir_info=dir_info, batch=batch) # deal with files inside root dir for file in minifind(dir_path, exclude=exclude_list, mindepth=1, type=S_IFREG): dir_info.path = file check_perms( file_type=S_IFREG, dir_info=dir_info, batch=batch) else: logging.warning('''The type of %s is not recognised by the ''' '''check_user() function.''' % dir_info['path']) return all_went_ok if dirs_infos != None: # first, check user_home try: check_one_dir_and_acl(dirs_infos._default) except AttributeError: pass # check all specials_dirs for dir_info in dirs_infos: if check_one_dir_and_acl(dir_info) is False: return False else: return True else: raise exceptions.BadArgumentError( "You must pass something through dirs_infos to check!") | 35fc5c64dfc0567f1862ec7ffb81a147ef2b8f43 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/35fc5c64dfc0567f1862ec7ffb81a147ef2b8f43/fsapi.py |
if daemon.cmdlistener.role == licornd_roles.SERVER: | if LMC.configuration.licornd.role == licornd_roles.SERVER: | def acceptHost(self, daemon, connection): """ Very basic check for the connection. """ client_addr, client_socket = connection.addr | 0f31f110017538e52dfb02852ad84eb582ec192d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/0f31f110017538e52dfb02852ad84eb582ec192d/cmdlistener.py |
logging.warning('''%s: socket already in use. ''' '''waiting (total: %ds).''' % (self.name, count)) | logging.warning('''%s: %s. ''' '''waiting (total: %ds).''' % (self.name, e, count)) | def run(self): assert ltrace('thread', '%s running' % self.name) | 3efdcf45fa2a5f65ea5a4855fd293bcc4c48f257 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/3efdcf45fa2a5f65ea5a4855fd293bcc4c48f257/cmdlistener.py |
self.pyro_daemon.shutdown(True) | def run(self): assert ltrace('thread', '%s running' % self.name) | 3efdcf45fa2a5f65ea5a4855fd293bcc4c48f257 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/3efdcf45fa2a5f65ea5a4855fd293bcc4c48f257/cmdlistener.py |
|
def log_and_exec (command, inverse_test=False, result_code=0, comment="", | def log_and_exec(command, inverse_test=False, result_code=0, comment="", | def log_and_exec (command, inverse_test=False, result_code=0, comment="", verb=verbose): """Display a command, execute it, and exit if soemthing went wrong.""" sys.stderr.write("%s>>> running %s%s%s\n" % (colors[ST_LOG], colors[ST_PATH], command, colors[ST_NO])) output, retcode = execute(command) must_exit = False # # TODO: implement a precise test on a precise exit value. # for example, when you try to add a group with an invalid name, # licorn-add should exit (e.g.) 34. We must test on this precise # value and not on != 0, because if something wrong but *other* than # errno 34 happened, we won't know it if we don't check carefully the # program output. # if inverse_test: if retcode != result_code: must_exit = True else: if retcode != 0: must_exit = True if must_exit: if inverse_test: test = (" %s→ it should have failed with reason: %s%s%s\n" % (colors[ST_PATH], colors[ST_BAD], comment, colors[ST_NO])) else: test = "" sys.stderr.write(" %s→ return code of command: %s%d%s (expected: %d)%s\n%s → log follows:\n" % ( colors[ST_LOG], colors[ST_BAD], retcode, colors[ST_LOG], result_code, colors[ST_NO], test) ) sys.stderr.write(output) sys.stderr.write( "The last command failed to execute, or return something wrong !\n") raise SystemExit(retcode) if verb: sys.stderr.write(output) | 38a496a61795ec7fb8b2f6589579ac75d0b90d9d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/38a496a61795ec7fb8b2f6589579ac75d0b90d9d/core.py |
return (strip_moving_data(output), retcode) | def RunCommand(self, cmdnum, batch=False): | 38a496a61795ec7fb8b2f6589579ac75d0b90d9d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/38a496a61795ec7fb8b2f6589579ac75d0b90d9d/core.py |
|
ScenarioTest(commands, descr="integrated help").Run() | ScenarioTest(commands, descr='''test integrated help of all CLI commands''' ).Run() | def test_integrated_help(): """Test extensively argmarser contents and intergated help.""" commands = [] for program in (GET, ADD, MOD, DEL, CHK): commands.extend([ program + ['-h'], program + ['--help']]) if program == ADD: modes = [ 'user', 'users', 'group', 'profile' ] elif program == MOD: modes = [ 'configuration', 'user', 'group', 'profile' ] elif program == DEL: modes = [ 'user', 'group', 'groups', 'profile' ] elif program == GET: modes = [ 'user', 'users', 'passwd', 'group', 'groups', 'profiles', 'configuration' ] elif program == CHK: modes = [ 'user', 'users', 'group', 'groups', 'profile', 'profiles', 'configuration' ] for mode in modes: if program == GET and mode == 'configuration': commands.append(program + [ mode ]) else: commands.extend([ program + [ mode, '-h' ], program + [ mode, '--help' ] ]) ScenarioTest(commands, descr="integrated help").Run() | 38a496a61795ec7fb8b2f6589579ac75d0b90d9d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/38a496a61795ec7fb8b2f6589579ac75d0b90d9d/core.py |
ScenarioTest(commands, context=context, descr="get tests").Run() | ScenarioTest(commands, context=context, descr='''CLI get tests''').Run() | def test_get(context): """Test GET a lot.""" commands = [] for category in [ 'config_dir', 'main_config_file', 'extendedgroup_data_file' ]: for mode in [ '', '-s', '-b', '--bourne-shell', '-c', '--c-shell', '-p', '--php-code' ]: commands.append(GET + [ 'configuration', category, mode ]) for category in [ 'skels', 'shells', 'backends' ]: commands.append(GET + [ 'config', category ]) commands += [ # users GET + [ "users" ], GET + [ "users", "--xml" ], GET + [ "users", "--long" ], GET + [ "users", "--long", "--xml" ], GET + [ "users", "--all" ], GET + [ "users", "--xml", "--all" ], GET + [ "users", "--all", "--long" ], GET + [ "users", "--xml", "--all", "--long" ], # groups GET + [ "groups" ], GET + [ "groups", "--xml" ], GET + [ "groups", "--long" ], GET + [ "groups", "--long", "--xml" ], GET + [ "groups", "--xml", "--all" ], GET + [ "groups", "--xml", "--all", "--long" ], GET + [ "groups", "--xml", "--guests" ], GET + [ "groups", "--xml", "--guests", "--long" ], GET + [ "groups", "--xml", "--responsibles" ], GET + [ "groups", "--xml", "--responsibles", "--long" ], GET + [ "groups", "--xml", "--privileged" ], GET + [ "groups", "--xml", "--privileged", "--long" ], # Profiles GET + [ "profiles" ], GET + [ "profiles", "--xml" ], ] ScenarioTest(commands, context=context, descr="get tests").Run() | 38a496a61795ec7fb8b2f6589579ac75d0b90d9d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/38a496a61795ec7fb8b2f6589579ac75d0b90d9d/core.py |
def test_regexes(): """ Try funky strings to make regexes fail (they should not).""" # TODO: test regexes directly from defs in licorn.core.... test_message('''starting regexes tests.''') regexes_commands = [] # groups related regexes_commands.extend([ ADD + [ 'group', "--name='_- -_'" ], CHK + [ 'group', "--name='_- -_'" ], ADD + [ 'group', "--name=';-)'" ], ADD + [ 'group', "--name='^_^'" ], ADD + [ 'group', "--name='le copain des groupes'" ], CHK + [ 'group', '-v', "--name='le copain des groupes'" ], ADD + [ 'group', "--name='héhéhé'" ], ADD + [ 'group', "--name='%(\`ls -la /etc/passwd\`)'" ], ADD + [ 'group', "--name='echo print coucou | python | nothing'" ], ADD + [ 'group', "--name='**/*-'" ], CHK + [ 'group', '-v', "--name='**/*-'" ] ]) # users related regexes_commands.extend([ ADD + [ 'user', "--login='_- -_'" ], ADD + [ 'user', "--login=';-)'" ], ADD + [ 'user', "--login='^_^'" ], ADD + [ 'user', "--login='le copain des utilisateurs'" ], ADD + [ 'user', "--login='héhéhé'" ], ADD + [ 'user', "--login='%(\`ls -la /etc/passwd\`)'" ], ADD + [ 'user', "--login='echo print coucou | python'" ], ADD + [ 'user', "--login='**/*-'" ] ]) ScenarioTest(regexes_commands).Run() # TODO: profiles ? test_message('''regexes tests finished.''') | 38a496a61795ec7fb8b2f6589579ac75d0b90d9d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/38a496a61795ec7fb8b2f6589579ac75d0b90d9d/core.py |
||
def clean_system(): """ Remove all stuff to make the system clean, testsuite-wise.""" test_message('''cleaning system from previous runs.''') for argument in ( ['user', '''toto,tutu,tata,titi,test,utilisager.normal,''' \ '''test.responsibilly,utilicateur.accentue,user_test,''' \ '''grp-acl-user,utest_267,user_test2,user_test3,user_testsys,''' \ '''user_testsys2,user_testsys3,user_test_DEBIAN,usertestdebian''', '--no-archive', '-v' ], ['profile', '''--group=utilisagers,responsibilisateurs,''' '''profil_test''', '--del-users', '--no-archive', '-v' ], ['group', '''test_users_A,test_users_B,groupeA,B-Group_Test,''' \ '''groupe_a_skel,ACL_tests,MOD_tests,SYSTEM-test,SKEL-tests,''' \ '''ARCHIVES-test,group_test,group_testsys,group_test2,''' \ '''group_test3,GRP-ACL-test,gtest_267,group_test4,ce1,ce2,cm2,cp''', '--no-archive', '-v' ], ['privilege', '--name=group_test', '-v' ] ): execute(DEL + argument) for directory in ( configuration.home_backup_dir, configuration.home_archive_dir ): clean_dir_contents(directory) execute(ADD + ['group', '--system', 'acl,admins,remotessh,licorn-wmi']) test_message('''system cleaned from previous testsuite runs.''') def clean_dir_contents(directory): """ Totally empty the contents of a given directory, the licorn way. """ if verbose: test_message('Cleaning directory %s.' % directory) def delete_entry(entry): if verbose: logging.notice('Deleting %s.' % entry) if os.path.isdir(entry): shutil.rmtree(entry) else: os.unlink(entry) for entry in fsapi.minifind(directory, mindepth=1, maxdepth=2, type=stat.S_IFDIR|stat.S_IFREG): delete_entry(entry) if verbose: test_message('Cleaned directory %s.' % directory) def make_backups(mode): """Make backup of important system files before messing them up ;-) """ execute([ 'chk', 'config', '-avvb']) if mode == 'unix': for file in system_files: if os.path.exists('/etc/%s' % file): execute([ 'cp', '-f', '/etc/%s' % file, '/tmp/%s.bak.%s' % (file.replace('/', '_'), bkp_ext)]) elif mode == 'ldap': execute([ 'slapcat', '-l', '/tmp/backup.1.ldif' ]) else: logging.error('backup mode not understood.') test_message('''made backups of system config files.''') def compare_delete_backups(mode): test_message('''comparing backups of system files after tests for side-effects alterations.''') if mode == 'unix': for file in system_files: if os.path.exists('/etc/%s' % file): log_and_exec([ '/usr/bin/colordiff', '/etc/%s' % file, '/tmp/%s.bak.%s' % (file.replace('/', '_'), bkp_ext)], False, comment="should not display any diff (system has been cleaned).", verb = True) execute([ 'rm', '/tmp/%s.bak.%s' % (file.replace('/', '_'), bkp_ext)]) elif mode == 'ldap': execute([ 'slapcat', '-l', '/tmp/backup.2.ldif']) log_and_exec([ '/usr/bin/colordiff', '/tmp/backup.1.ldif', '/tmp/backup.2.ldif'], False, comment="should not display any diff (system has been cleaned).", verb = True) execute([ 'rm', '/tmp/backup.1.ldif', '/tmp/backup.2.ldif']) else: logging.error('backup mode not understood.') test_message('''system config files backup comparison finished successfully.''') | def clean_system(): """ Remove all stuff to make the system clean, testsuite-wise.""" test_message('''cleaning system from previous runs.''') # delete them first in case of a previous failed testsuite run. # don't check exit codes or such, this will be done later. for argument in ( ['user', '''toto,tutu,tata,titi,test,utilisager.normal,''' \ '''test.responsibilly,utilicateur.accentue,user_test,''' \ '''grp-acl-user,utest_267,user_test2,user_test3,user_testsys,''' \ '''user_testsys2,user_testsys3,user_test_DEBIAN,usertestdebian''', '--no-archive', '-v' ], ['profile', '''--group=utilisagers,responsibilisateurs,''' '''profil_test''', '--del-users', '--no-archive', '-v' ], ['group', '''test_users_A,test_users_B,groupeA,B-Group_Test,''' \ '''groupe_a_skel,ACL_tests,MOD_tests,SYSTEM-test,SKEL-tests,''' \ '''ARCHIVES-test,group_test,group_testsys,group_test2,''' \ '''group_test3,GRP-ACL-test,gtest_267,group_test4,ce1,ce2,cm2,cp''', '--no-archive', '-v' ], ['privilege', '--name=group_test', '-v' ] ): execute(DEL + argument) for directory in ( configuration.home_backup_dir, configuration.home_archive_dir ): clean_dir_contents(directory) execute(ADD + ['group', '--system', 'acl,admins,remotessh,licorn-wmi']) test_message('''system cleaned from previous testsuite runs.''') | 38a496a61795ec7fb8b2f6589579ac75d0b90d9d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/38a496a61795ec7fb8b2f6589579ac75d0b90d9d/core.py |
|
descr='ADD group with specified skel and descr', | descr='''ADD group with specified skel and descr''', | def chk_acls_cmds(group, subdir=None): return [ 'getfacl', '-R', '%s/%s/%s%s' % ( configuration.defaults.home_base_path, configuration.groups.names.plural, group, '/%s' % subdir if subdir else '') ] | 38a496a61795ec7fb8b2f6589579ac75d0b90d9d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/38a496a61795ec7fb8b2f6589579ac75d0b90d9d/core.py |
descr='''Check if privilege list is up to date after group deletion''' ''' (fix | descr='''Check if privilege list is up to date after group deletion ''' '''(fix | def chk_acls_cmds(group, subdir=None): return [ 'getfacl', '-R', '%s/%s/%s%s' % ( configuration.defaults.home_base_path, configuration.groups.names.plural, group, '/%s' % subdir if subdir else '') ] | 38a496a61795ec7fb8b2f6589579ac75d0b90d9d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/38a496a61795ec7fb8b2f6589579ac75d0b90d9d/core.py |
descr='tests of groups commands with --gid option (fix | descr='''tests of groups commands with --gid option (fix | def chk_acls_cmds(group, subdir=None): return [ 'getfacl', '-R', '%s/%s/%s%s' % ( configuration.defaults.home_base_path, configuration.groups.names.plural, group, '/%s' % subdir if subdir else '') ] | 38a496a61795ec7fb8b2f6589579ac75d0b90d9d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/38a496a61795ec7fb8b2f6589579ac75d0b90d9d/core.py |
def chk_acls_cmds(group, subdir=None): return [ 'getfacl', '-R', '%s/%s/%s%s' % ( configuration.defaults.home_base_path, configuration.groups.names.plural, group, '/%s' % subdir if subdir else '') ] | 38a496a61795ec7fb8b2f6589579ac75d0b90d9d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/38a496a61795ec7fb8b2f6589579ac75d0b90d9d/core.py |
||
def chk_acls_cmds(group, subdir=None): return [ 'getfacl', '-R', '%s/%s/%s%s' % ( configuration.defaults.home_base_path, configuration.groups.names.plural, group, '/%s' % subdir if subdir else '') ] | 38a496a61795ec7fb8b2f6589579ac75d0b90d9d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/38a496a61795ec7fb8b2f6589579ac75d0b90d9d/core.py |
||
descr='''check if a user can be modified with an incorrect shell and with a correct shell''' | descr='''check if a user can be modified with an incorrect shell and ''' '''with a correct shell''' | def chk_acls_cmds(dir): return [ 'getfacl', '-R', dir ] | 38a496a61795ec7fb8b2f6589579ac75d0b90d9d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/38a496a61795ec7fb8b2f6589579ac75d0b90d9d/core.py |
descr='test add user with --firstname and --lastname options (fix | descr='''test add user with --firstname and --lastname options ''' '''(fix | def chk_acls_cmds(dir): return [ 'getfacl', '-R', dir ] | 38a496a61795ec7fb8b2f6589579ac75d0b90d9d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/38a496a61795ec7fb8b2f6589579ac75d0b90d9d/core.py |
descr='modify one or more parameters of a user (avoid | descr='''modify one or more parameters of a user (avoid | def chk_acls_cmds(dir): return [ 'getfacl', '-R', dir ] | 38a496a61795ec7fb8b2f6589579ac75d0b90d9d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/38a496a61795ec7fb8b2f6589579ac75d0b90d9d/core.py |
descr='check option --home of user command (fix | descr='''check option --home of user command (fix | def chk_acls_cmds(dir): return [ 'getfacl', '-R', dir ] | 38a496a61795ec7fb8b2f6589579ac75d0b90d9d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/38a496a61795ec7fb8b2f6589579ac75d0b90d9d/core.py |
descr='''check messages of --lock and --unlock on mod user command and answer of get user --long (avoid | descr='''check messages of --lock and --unlock on mod user command ''' '''and answer of get user --long (avoid | def chk_acls_cmds(dir): return [ 'getfacl', '-R', dir ] | 38a496a61795ec7fb8b2f6589579ac75d0b90d9d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7650/38a496a61795ec7fb8b2f6589579ac75d0b90d9d/core.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.