text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def start(self): """ Launches a new HTTP client session on the server taken from the `self.options` dict. :param my_ip: IP of this Client itself """
username = self.options['username'] password = self.options['password'] server_host = self.options['server'] server_port = self.options['port'] honeypot_id = self.options['honeypot_id'] session = self.create_session(server_host, server_port, honeypot_id) self.sessions[session.id] = session logger.debug( 'Sending {0} bait session to {1}:{2}. (bait id: {3})'.format('http', server_host, server_port, session.id)) try: url = self._make_url(server_host, '/index.html', server_port) response = self.client.get(url, auth=HTTPBasicAuth(username, password), verify=False) session.did_connect = True if response.status_code == 200: session.add_auth_attempt('plaintext', True, username=username, password=password) session.did_login = True else: session.add_auth_attempt('plaintext', False, username=username, password=password) links = self._get_links(response) while self.sent_requests <= self.max_requests and links: url = random.choice(links) response = self.client.get(url, auth=HTTPBasicAuth(username, password), verify=False) links = self._get_links(response) session.did_complete = True except Exception as err: logger.debug('Caught exception: {0} ({1})'.format(err, str(type(err)))) finally: session.alldone = True session.end_session() self.client.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_links(self, response): """ Parses the response text and returns all the links in it. :param response: The Response object. """
html_text = response.text.encode('utf-8') doc = document_fromstring(html_text) links = [] for e in doc.cssselect('a'): links.append(e.get('href'))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bootstrap(server_workdir, drone_workdir): """Bootstraps localhost configurations for a Beeswarm server and a honeypot. :param server_workdir: Output directory for the server configuration file. :param drone_workdir: Output directory for the drone configuration file. """
root_logger = logging.getLogger() root_logger.setLevel(logging.DEBUG) formatter = logging.Formatter('%(asctime)-15s (%(name)s) %(message)s') console_log = logging.StreamHandler() console_log.setLevel(logging.INFO) console_log.setFormatter(formatter) root_logger.addHandler(console_log) server_workdir_absolute = os.path.abspath(server_workdir) old_cwd = os.getcwd() os.chdir(server_workdir) server = Server(server_workdir_absolute, None, start_webui=False, customize=False, reset_password=False, max_sessions=0, server_hostname='127.0.0.1') logger.info('Server config has been written to {0}'.format(os.path.join(server_workdir, 'beeswarmcfg.json'))) gevent.spawn(server.start, False) # waiting game to ensure actors has started. gevent.sleep(2) os.chdir(old_cwd) # setting up socket to communicate with ZMQ actor. context = beeswarm.shared.zmq_context database_actor = context.socket(zmq.REQ) database_actor.connect(SocketNames.DATABASE_REQUESTS.value) db_session = database_setup.get_session() drone = Honeypot() protocol_config = ( ('ftp', 21, { 'max_attempts': 3, 'banner': 'Microsoft FTP Server', 'syst_type': 'Windows-NT' }), ('telnet', 23, { 'max_attempts': 3 }), ('pop3', 110, { 'max_attempts': 3 }), ('pop3s', 993, { 'max_attempts': 3 }), ('ssh', 22, {}), ('http', 80, { 'banner': 'Microsoft-IIS/5.0' }), ('https', 443, { 'banner': 'Microsoft-IIS/5.0' }), ('smtp', 25, { 'banner': 'Microsoft ESMTP MAIL service ready' }), ('vnc', 5900, {}) ) for protocol, port, protocol_specific_data in protocol_config: drone.add_capability(protocol, port, protocol_specific_data) drone.cert_common_name = '*' drone.cert_country = 'US' drone.cert_state = 'None' drone.cert_locality = 'None' drone.cert_organization = 'None' drone.cert_organization_unit = '' db_session.add(drone) db_session.commit() drone_config = send_zmq_request_socket(database_actor, '{0} {1}'.format(Messages.DRONE_CONFIG.value, drone.id)) with open(os.path.join(drone_workdir, 'beeswarmcfg.json'), 'w') as drone_config_file: drone_config_file.write(json.dumps(drone_config, indent=4)) logger.info('Drone config has been written to {0}'.format(os.path.join(server_workdir, 'beeswarmcfg.json'))) server.stop()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def database_exists(url): """Check if a database exists. :param url: A SQLAlchemy engine URL. Performs backend-specific testing to quickly determine if a database exists on the server. :: database_exists('postgres://postgres@localhost/name') #=> False create_database('postgres://postgres@localhost/name') database_exists('postgres://postgres@localhost/name') #=> True Supports checking against a constructed URL as well. :: engine = create_engine('postgres://postgres@localhost/name') database_exists(engine.url) #=> False create_database(engine.url) database_exists(engine.url) #=> True """
url = copy(make_url(url)) database = url.database if url.drivername.startswith('postgresql'): url.database = 'template1' else: url.database = None engine = sa.create_engine(url) if engine.dialect.name == 'postgresql': text = "SELECT 1 FROM pg_database WHERE datname='%s'" % database return bool(engine.execute(text).scalar()) elif engine.dialect.name == 'mysql': text = ("SELECT SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA " "WHERE SCHEMA_NAME = '%s'" % database) return bool(engine.execute(text).scalar()) elif engine.dialect.name == 'sqlite': return database == ':memory:' or os.path.exists(database) else: text = 'SELECT 1' try: url.database = database engine = sa.create_engine(url) engine.execute(text) return True except (ProgrammingError, OperationalError): return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def message_proxy(self, work_dir): """ drone_data_inboud is for data comming from drones drone_data_outbound is for commands to the drones, topic must either be a drone ID or all for sending a broadcast message to all drones """
public_keys_dir = os.path.join(work_dir, 'certificates', 'public_keys') secret_keys_dir = os.path.join(work_dir, 'certificates', 'private_keys') # start and configure auth worker auth = IOLoopAuthenticator() auth.start() auth.allow('127.0.0.1') auth.configure_curve(domain='*', location=public_keys_dir) # external interfaces for communicating with drones server_secret_file = os.path.join(secret_keys_dir, 'beeswarm_server.pri') server_public, server_secret = load_certificate(server_secret_file) drone_data_inbound = beeswarm.shared.zmq_context.socket(zmq.PULL) drone_data_inbound.curve_secretkey = server_secret drone_data_inbound.curve_publickey = server_public drone_data_inbound.curve_server = True drone_data_inbound.bind('tcp://*:{0}'.format(self.config['network']['zmq_port'])) drone_data_outbound = beeswarm.shared.zmq_context.socket(zmq.PUB) drone_data_outbound.curve_secretkey = server_secret drone_data_outbound.curve_publickey = server_public drone_data_outbound.curve_server = True drone_data_outbound.bind('tcp://*:{0}'.format(self.config['network']['zmq_command_port'])) # internal interfaces # all inbound session data from drones will be replayed on this socket drone_data_socket = beeswarm.shared.zmq_context.socket(zmq.PUB) drone_data_socket.bind(SocketNames.DRONE_DATA.value) # all commands received on this will be published on the external interface drone_command_socket = beeswarm.shared.zmq_context.socket(zmq.PULL) drone_command_socket.bind(SocketNames.DRONE_COMMANDS.value) poller = zmq.Poller() poller.register(drone_data_inbound, zmq.POLLIN) poller.register(drone_command_socket, zmq.POLLIN) while True: # .recv() gives no context switch - why not? using poller with timeout instead socks = dict(poller.poll(100)) gevent.sleep() if drone_command_socket in socks and socks[drone_command_socket] == zmq.POLLIN: data = drone_command_socket.recv() drone_id, _ = data.split(' ', 1) logger.debug("Sending drone command to: {0}".format(drone_id)) # pub socket takes care of filtering drone_data_outbound.send(data) elif drone_data_inbound in socks and socks[drone_data_inbound] == zmq.POLLIN: raw_msg = drone_data_inbound.recv() split_data = raw_msg.split(' ', 2) if len(split_data) == 3: topic, drone_id, data = split_data else: data = None topic, drone_id, = split_data logger.debug("Received {0} message from {1}.".format(topic, drone_id)) # relay message on internal socket drone_data_socket.send(raw_msg)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def start(self): """ Starts the BeeSwarm server. """
self.started = True if self.app: web_port = self.config['network']['web_port'] logger.info('Starting server listening on port {0}'.format(web_port)) key_file = os.path.join(self.work_dir, 'server.key') cert_file = os.path.join(self.work_dir, 'server.crt') http_server = WSGIServer(('', web_port), self.app, keyfile=key_file, certfile=cert_file) http_server_greenlet = gevent.spawn(http_server.serve_forever) self.greenlets.append(http_server_greenlet) stop_if_not_write_workdir(self.work_dir) logger.info('Server started.') gevent.joinall(self.greenlets)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def time_in_range(self): """Return true if current time is in the active range"""
curr = datetime.datetime.now().time() if self.start_time <= self.end_time: return self.start_time <= curr <= self.end_time else: return self.start_time <= curr or curr <= self.end_time
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def start(self): """ Launches a new Telnet client session on the server taken from the `self.options` dict. This session always fails. :param my_ip: IP of this Client itself """
password = self.options['password'] server_host = self.options['server'] server_port = self.options['port'] honeypot_id = self.options['honeypot_id'] session = self.create_session(server_host, server_port, honeypot_id) self.sessions[session.id] = session logger.debug( 'Sending {0} bait session to {1}:{2}. (bait id: {3})'.format('vnc', server_host, server_port, session.id)) client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: client_socket.connect((server_host, int(server_port))) session.source_port = client_socket.getsockname()[1] except socket.error as e: logger.debug('Caught exception: {0} ({1})'.format(e, str(type(e)))) else: session.did_connect = True protocol_version = client_socket.recv(1024) client_socket.send(RFB_VERSION) supported_auth_methods = client_socket.recv(1024) # \x02 implies that VNC authentication method is to be used # Refer to http://tools.ietf.org/html/rfc6143#section-7.1.2 for more info. if '\x02' in supported_auth_methods: client_socket.send(VNC_AUTH) challenge = client_socket.recv(1024) # password limit for vnc in 8 chars aligned_password = (password + '\0' * 8)[:8] des = RFBDes(aligned_password) response = des.encrypt(challenge) client_socket.send(response) auth_status = client_socket.recv(1024) if auth_status == AUTH_SUCCESSFUL: session.add_auth_attempt('des_challenge', True, password=aligned_password) session.did_login = True else: session.add_auth_attempt('des_challenge', False, password=aligned_password) session.did_login = False session.did_complete = True finally: session.alldone = True session.end_session() if client_socket: client_socket.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_session(self, server_host, server_port, honeypot_id): """ Creates a new session. :param server_host: IP address of the server :param server_port: Server port :return: A new `BaitSession` object. """
protocol = self.__class__.__name__.lower() session = BaitSession(protocol, server_host, server_port, honeypot_id) self.sessions[session.id] = session return session
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def start(self): """ Launches a new FTP client session on the server taken from the `self.options` dict. :param my_ip: IP of this Client itself """
username = self.options['username'] password = self.options['password'] server_host = self.options['server'] server_port = self.options['port'] honeypot_id = self.options['honeypot_id'] command_limit = random.randint(6, 11) session = self.create_session(server_host, server_port, honeypot_id) self.sessions[session.id] = session logger.debug( 'Sending {0} bait session to {1}:{2}. (bait id: {3})'.format('ftp', server_host, server_port, session.id)) self.file_list = [] try: self.connect() session.did_connect = True # TODO: Catch login failure self.login(username, password) session.add_auth_attempt('plaintext', True, username=username, password=password) session.did_login = True session.timestamp = datetime.utcnow() except ftplib.error_perm as err: logger.debug('Caught exception: {0} ({1})'.format(err, str(type(err)))) except socket.error as err: logger.debug('Error while communicating: {0} ({1})'.format(err, str(type(err)))) else: command_count = 0 while command_count <= command_limit: command_count += 1 try: self.sense() cmd, param = self.decide() self.act(cmd, param) gevent.sleep(random.uniform(0, 3)) except IndexError: # This means we hit an empty folder, or a folder with only files. continue session.did_complete = True finally: if self.client.sock is not None: # will close socket self.client.quit() session.alldone = True session.end_session()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sense(self): """ Launches a few "sensing" commands such as 'ls', or 'pwd' and updates the current bait state. """
cmd_name = random.choice(self.senses) command = getattr(self, cmd_name) self.state['last_command'] = cmd_name command()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def decide(self): """ Decides the next command to be launched based on the current state. :return: Tuple containing the next command name, and it's parameters. """
next_command_name = random.choice(self.COMMAND_MAP[self.state['last_command']]) param = '' if next_command_name == 'retrieve': param = random.choice(self.state['file_list']) elif next_command_name == 'cwd': param = random.choice(self.state['dir_list']) return next_command_name, param
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def act(self, cmd_name, param): """ Run the command with the parameters. :param cmd_name: The name of command to run :param param: Params for the command """
command = getattr(self, cmd_name) if param: command(param) else: command()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def list(self): """ Run the FTP LIST command, and update the state. """
logger.debug('Sending FTP list command.') self.state['file_list'] = [] self.state['dir_list'] = [] self.client.retrlines('LIST', self._process_list)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def retrieve(self, filename): """ Run the FTP RETR command, and download the file :param filename: Name of the file to download """
logger.debug('Sending FTP retr command. Filename: {}'.format(filename)) self.client.retrbinary('RETR {}'.format(filename), self._save_file)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cwd(self, newdir): """ Send the FTP CWD command :param newdir: Directory to change to """
logger.debug('Sending FTP cwd command. New Workding Directory: {}'.format(newdir)) self.client.cwd(newdir) self.state['current_dir'] = self.client.pwd()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _process_list(self, list_line): # -rw-r--r-- 1 ftp ftp 68 May 09 19:37 testftp.txt """ Processes a line of 'ls -l' output, and updates state accordingly. :param list_line: Line to process """
res = list_line.split(' ', 8) if res[0].startswith('-'): self.state['file_list'].append(res[-1]) if res[0].startswith('d'): self.state['dir_list'].append(res[-1])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def start(self): """ Starts sending client bait to the configured Honeypot. """
logger.info('Starting client.') self.dispatcher_greenlets = [] for _, entry in self.config['baits'].items(): for b in clientbase.ClientBase.__subclasses__(): bait_name = b.__name__.lower() # if the bait has a entry in the config we consider the bait enabled if bait_name in entry: bait_options = entry[bait_name] dispatcher = BaitDispatcher(b, bait_options) dispatcher.start() self.dispatcher_greenlets.append(dispatcher) logger.info('Adding {0} bait'.format(bait_name)) logger.debug('Bait added with options: {0}'.format(bait_options)) gevent.joinall(self.dispatcher_greenlets)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def start(self): """ Launches a new POP3 client session on the server taken from the `self.options` dict. :param my_ip: IP of this Client itself """
username = self.options['username'] password = self.options['password'] server_host = self.options['server'] server_port = self.options['port'] honeypot_id = self.options['honeypot_id'] session = self.create_session(server_host, server_port, honeypot_id) try: logger.debug( 'Sending {0} bait session to {1}:{2}. (bait id: {3})'.format('pop3', server_host, server_port, session.id)) conn = poplib.POP3_SSL(server_host, server_port) session.source_port = conn.sock.getsockname()[1] banner = conn.getwelcome() session.protocol_data['banner'] = banner session.did_connect = True conn.user(username) conn.pass_(password) # TODO: Handle failed login session.add_auth_attempt('plaintext', True, username=username, password=password) session.did_login = True session.timestamp = datetime.utcnow() except Exception as err: logger.debug('Caught exception: {0} ({1})'.format(err, str(type(err)))) else: list_entries = conn.list()[1] for entry in list_entries: index, _ = entry.split(' ') conn.retr(index) conn.dele(index) logger.debug('Found and deleted {0} messages on {1}'.format(len(list_entries), server_host)) conn.quit() session.did_complete = True finally: session.alldone = True session.end_session()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_matching_session(self, session, db_session, timediff=5): """ Tries to match a session with it's counterpart. For bait session it will try to match it with honeypot sessions and the other way around. :param session: session object which will be used as base for query. :param timediff: +/- allowed time difference between a session and a potential matching session. """
db_session = db_session min_datetime = session.timestamp - timedelta(seconds=timediff) max_datetime = session.timestamp + timedelta(seconds=timediff) # default return value match = None classification = db_session.query(Classification).filter( Classification.type == 'pending').one() # get all sessions that match basic properties. sessions = db_session.query(Session).options(joinedload(Session.authentication)) \ .filter(Session.protocol == session.protocol) \ .filter(Session.honeypot == session.honeypot) \ .filter(Session.timestamp >= min_datetime) \ .filter(Session.timestamp <= max_datetime) \ .filter(Session.id != session.id) \ .filter(Session.classification == classification) # identify the correct session by comparing authentication. # this could properly also be done using some fancy ORM/SQL construct. for potential_match in sessions: if potential_match.discriminator == session.discriminator: continue assert potential_match.id != session.id for honey_auth in session.authentication: for session_auth in potential_match.authentication: if session_auth.username == honey_auth.username and \ session_auth.password == honey_auth.password and \ session_auth.successful == honey_auth.successful: assert potential_match.id != session.id match = potential_match break return match
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _classify_malicious_sessions(self): """ Will classify all unclassified sessions as malicious activity. :param delay_seconds: no sessions newer than (now - delay_seconds) will be processed. """
min_datetime = datetime.utcnow() - timedelta(seconds=self.delay_seconds) db_session = database_setup.get_session() # find and process bait sessions that did not get classified during # persistence. bait_sessions = db_session.query(BaitSession).options(joinedload(BaitSession.authentication)) \ .filter(BaitSession.classification_id == 'pending') \ .filter(BaitSession.did_complete == True) \ .filter(BaitSession.received < min_datetime).all() for bait_session in bait_sessions: logger.debug( 'Classifying bait session with id {0} as MITM'.format(bait_session.id)) bait_session.classification = db_session.query( Classification).filter(Classification.type == 'mitm').one() db_session.commit() # find and process honeypot sessions that did not get classified during # persistence. sessions = db_session.query(Session, Drone.name).filter(Session.discriminator == None) \ .filter(Session.timestamp <= min_datetime) \ .filter(Session.classification_id == 'pending') \ .all() for entry in sessions: # Check if the attack used credentials leaked by beeswarm drones session = entry[0] bait_match = None for a in session.authentication: bait_match = db_session.query(BaitSession) \ .filter(BaitSession.authentication.any(username=a.username, password=a.password)).first() if bait_match: break if bait_match: logger.debug('Classifying session with id {0} as attack which involved the reuse ' 'of previously transmitted credentials.'.format(session.id)) session.classification = db_session.query(Classification).filter( Classification.type == 'credentials_reuse').one() elif len(session.authentication) == 0: logger.debug( 'Classifying session with id {0} as probe.'.format(session.id)) session.classification = db_session.query( Classification).filter(Classification.type == 'probe').one() else: # we have never transmitted this username/password combo logger.debug( 'Classifying session with id {0} as bruteforce attempt.'.format(session.id)) session.classification = db_session.query(Classification).filter( Classification.type == 'bruteforce').one() db_session.commit() session.name = entry[1] self.processedSessionsPublisher.send( '{0} {1}'.format(Messages.SESSION.value, json.dumps(session.to_dict())))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def start(self): """ Launches a new SSH client session on the server taken from the `self.options` dict. :param my_ip: IP of this Client itself """
username = self.options['username'] password = self.options['password'] server_host = self.options['server'] server_port = self.options['port'] honeypot_id = self.options['honeypot_id'] session = self.create_session(server_host, server_port, honeypot_id) self.sessions[session.id] = session logger.debug( 'Sending ssh bait session to {0}:{1}. (bait id: {2})'.format(server_host, server_port, session.id)) try: self.connect_login() session.did_connect = True # TODO: Handle failed login session.add_auth_attempt('plaintext', True, username=username, password=password) session.did_login = True except (SSHException, AuthenticationFailed) as err: logger.debug('Caught exception: {0} ({1})'.format(err, str(type(err)))) else: command_count = 0 command_limit = random.randint(6, 11) while command_count < command_limit: command_count += 1 self.sense() comm, param = self.decide() self.act(comm, param) gevent.sleep(random.uniform(0.4, 5.6)) self.logout() session.did_complete = True finally: session.alldone = True session.end_session() self.comm_chan.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def send_command(self, cmd): """ Send a command to the remote SSH server. :param cmd: The command to send """
logger.debug('Sending {0} command.'.format(cmd)) self.comm_chan.sendall(cmd + '\n')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def connect_login(self): """ Try to login to the Remote SSH Server. :return: Response text on successful login :raise: `AuthenticationFailed` on unsuccessful login """
self.client.connect(self.options['server'], self.options['port'], self.options['username'], self.options['password']) self.comm_chan = self.client.invoke_shell() time.sleep(1) # Let the server take some time to get ready. while not self.comm_chan.recv_ready(): time.sleep(0.5) login_response = self.comm_chan.recv(2048) if not login_response.endswith('$ '): raise AuthenticationFailed return login_response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def list2dict(list_of_options): """Transforms a list of 2 element tuples to a dictionary"""
d = {} for key, value in list_of_options: d[key] = value return d
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def path_to_ls(fn): """ Converts an absolute path to an entry resembling the output of the ls command on most UNIX systems."""
st = os.stat(fn) full_mode = 'rwxrwxrwx' mode = '' file_time = '' d = '' for i in range(9): # Incrementally builds up the 9 character string, using characters from the # fullmode (defined above) and mode bits from the stat() system call. mode += ((st.st_mode >> (8 - i)) & 1) and full_mode[i] or '-' d = (os.path.isdir(fn)) and 'd' or '-' file_time = time.strftime(' %b %d %H:%M ', time.gmtime(st.st_mtime)) list_format = '{0}{1} 1 ftp ftp {2}\t{3}{4}'.format(d, mode, str(st.st_size), file_time, os.path.basename(fn)) return list_format
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _start_drone(self): """ Restarts the drone """
with open(self.config_file, 'r') as config_file: self.config = json.load(config_file, object_hook=asciify) mode = None if self.config['general']['mode'] == '' or self.config['general']['mode'] is None: logger.info('Drone has not been configured, awaiting configuration from Beeswarm server.') elif self.config['general']['mode'] == 'honeypot': mode = Honeypot elif self.config['general']['mode'] == 'client': mode = Client if mode: self.drone = mode(self.work_dir, self.config) self.drone_greenlet = gevent.spawn(self.drone.start) self.drone_greenlet.link_exception(self.on_exception) logger.info('Drone configured and running. ({0})'.format(self.id))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def start(self): """ Launches a new SMTP client session on the server taken from the `self.options` dict. :param my_ip: IP of this Client itself """
username = self.options['username'] password = self.options['password'] server_host = self.options['server'] server_port = self.options['port'] honeypot_id = self.options['honeypot_id'] session = self.create_session(server_host, server_port, honeypot_id) logger.debug( 'Sending {0} bait session to {1}:{2}. (bait id: {3})'.format('smtp', server_host, server_port, session.id)) try: self.connect() session.did_connect = True session.source_port = self.client.sock.getsockname()[1] self.login(username, password) # TODO: Handle failed login # TODO: password='' is sillly fix, this needs to be fixed server side... session.add_auth_attempt('plaintext', True, username=username, password='') session.did_login = True except smtplib.SMTPException as error: logger.debug('Caught exception: {0} ({1})'.format(error, str(type(error)))) else: while self.sent_mails <= self.max_mails: from_addr, to_addr, mail_body = self.get_one_mail() try: if from_addr and to_addr and isinstance(mail_body, str): self.client.sendmail(from_addr, to_addr, mail_body) else: continue except TypeError as e: logger.debug('Malformed email in mbox archive, skipping.') continue else: self.sent_mails += 1 logger.debug('Sent mail from ({0}) to ({1})'.format(from_addr, to_addr)) time.sleep(1) self.client.quit() session.did_complete = True finally: logger.debug('SMTP Session complete.') session.alldone = True session.end_session() self.client.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_one_mail(self): """ Choose and return a random email from the mail archive. :return: Tuple containing From Address, To Address and the mail body. """
while True: mail_key = random.choice(self.mailbox.keys()) mail = self.mailbox[mail_key] from_addr = mail.get_from() to_addr = mail['To'] mail_body = mail.get_payload() if not from_addr or not to_addr: continue return from_addr, to_addr, mail_body
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def connect(self): """ Connect to the SMTP server. """
# TODO: local_hostname should be configurable self.client = smtplib.SMTP(self.options['server'], self.options['port'], local_hostname='local.domain', timeout=15)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _asciify_list(data): """ Ascii-fies list values """
ret = [] for item in data: if isinstance(item, unicode): item = _remove_accents(item) item = item.encode('utf-8') elif isinstance(item, list): item = _asciify_list(item) elif isinstance(item, dict): item = _asciify_dict(item) ret.append(item) return ret
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _asciify_dict(data): """ Ascii-fies dict keys and values """
ret = {} for key, value in data.iteritems(): if isinstance(key, unicode): key = _remove_accents(key) key = key.encode('utf-8') # # note new if if isinstance(value, unicode): value = _remove_accents(value) value = value.encode('utf-8') elif isinstance(value, list): value = _asciify_list(value) elif isinstance(value, dict): value = _asciify_dict(value) ret[key] = value return ret
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write_human(self, buffer_): """ Emulates human typing speed """
if self.IAC in buffer_: buffer_ = buffer_.replace(self.IAC, self.IAC + self.IAC) self.msg("send %r", buffer_) for char in buffer_: delta = random.gauss(80, 20) self.sock.sendall(char) time.sleep(delta / 1000.0)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def start(self): """ Launches a new Telnet client session on the server taken from the `self.options` dict. :param my_ip: IP of this Client itself """
login = self.options['username'] password = self.options['password'] server_host = self.options['server'] server_port = self.options['port'] honeypot_id = self.options['honeypot_id'] command_limit = random.randint(6, 11) session = self.create_session(server_host, server_port, honeypot_id) self.sessions[session.id] = session logger.debug( 'Sending telnet bait session to {0}:{1}. (bait id: {2})'.format(server_host, server_port, session.id)) try: self.connect() self.login(login, password) session.add_auth_attempt('plaintext', True, username=login, password=password) session.did_connect = True session.source_port = self.client.sock.getsockname()[1] session.did_login = True except InvalidLogin: logger.debug('Telnet session could not login. ({0})'.format(session.id)) session.did_login = False except Exception as err: logger.debug('Caught exception: {0} {1}'.format(err, str(err), exc_info=True)) else: command_count = 0 while command_count < command_limit: command_count += 1 self.sense() comm, param = self.decide() self.act(comm, param) gevent.sleep(random.uniform(0.4, 5.6)) self.act('logout') session.did_complete = True finally: session.alldone = True session.end_session() if self.client: self.client.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def connect(self): """ Open a new telnet session on the remote server. """
self.client = BaitTelnetClient(self.options['server'], self.options['port']) self.client.set_option_negotiation_callback(self.process_options)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def login(self, login, password): """ Login to the remote telnet server. :param login: Username to use for logging in :param password: Password to use for logging in :raise: `InvalidLogin` on failed login """
self.client.read_until('Username: ') self.client.write(login + '\r\n') self.client.read_until('Password: ') self.client.write(password + '\r\n') current_data = self.client.read_until('$ ', 10) if not current_data.endswith('$ '): raise InvalidLogin
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def logout(self): """ Logout from the remote server. """
self.client.write('exit\r\n') self.client.read_all() self.client.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sense(self): """ Launch a command in the 'senses' List, and update the current state."""
cmd_name = random.choice(self.senses) param = '' if cmd_name == 'ls': if random.randint(0, 1): param = '-l' elif cmd_name == 'uname': # Choose options from predefined ones opts = 'asnrvmpio' start = random.randint(0, len(opts) - 2) end = random.randint(start + 1, len(opts) - 1) param = '-{}'.format(opts[start:end]) command = getattr(self, cmd_name) command(param)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def decide(self): """ Choose the next command to execute, and its parameters, based on the current state. """
next_command_name = random.choice(self.COMMAND_MAP[self.state['last_command']]) param = '' if next_command_name == 'cd': try: param = random.choice(self.state['dir_list']) except IndexError: next_command_name = 'ls' elif next_command_name == 'uname': opts = 'asnrvmpio' start = random.randint(0, len(opts) - 2) end = random.randint(start + 1, len(opts) - 1) param = '-{}'.format(opts[start:end]) elif next_command_name == 'ls': if random.randint(0, 1): param = '-l' elif next_command_name == 'cat': try: param = random.choice(self.state['file_list']) except IndexError: param = ''.join(random.choice(string.lowercase) for x in range(3)) elif next_command_name == 'echo': param = random.choice([ '$http_proxy', '$https_proxy', '$ftp_proxy', '$BROWSER', '$EDITOR', '$SHELL', '$PAGER' ]) elif next_command_name == 'sudo': param = random.choice([ 'pm-hibernate', 'shutdown -h', 'vim /etc/httpd.conf', 'vim /etc/resolve.conf', 'service network restart', '/etc/init.d/network-manager restart', ]) return next_command_name, param
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def act(self, cmd_name, params=None): """ Run the specified command with its parameters."""
command = getattr(self, cmd_name) if params: command(params) else: command()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def mutate_json_record(self, json_record): """Override it to convert fields of `json_record` to needed types. Default implementation converts `datetime` to string in ISO8601 format. """
for attr_name in json_record: attr = json_record[attr_name] if isinstance(attr, datetime): json_record[attr_name] = attr.isoformat() return json_record
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(self): """ Listen to the stream and send events to the client. """
channel = self._ssh_client.get_transport().open_session() self._channel = channel channel.exec_command("gerrit stream-events") stdout = channel.makefile() stderr = channel.makefile_stderr() while not self._stop.is_set(): try: if channel.exit_status_ready(): if channel.recv_stderr_ready(): error = stderr.readline().strip() else: error = "Remote server connection closed" self._error_event(error) self._stop.set() else: data = stdout.readline() self._gerrit.put_event(data) except Exception as e: # pylint: disable=W0703 self._error_event(repr(e)) self._stop.set()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def query(self, term): """ Run a query. :arg str term: The query term to run. :Returns: A list of results as :class:`pygerrit.models.Change` objects. :Raises: `ValueError` if `term` is not a string. """
results = [] command = ["query", "--current-patch-set", "--all-approvals", "--format JSON", "--commit-message"] if not isinstance(term, basestring): raise ValueError("term must be a string") command.append(escape_string(term)) result = self._ssh_client.run_gerrit_command(" ".join(command)) decoder = JSONDecoder() for line in result.stdout.read().splitlines(): # Gerrit's response to the query command contains one or more # lines of JSON-encoded strings. The last one is a status # dictionary containing the key "type" whose value indicates # whether or not the operation was successful. # According to http://goo.gl/h13HD it should be safe to use the # presence of the "type" key to determine whether the dictionary # represents a change or if it's the query status indicator. try: data = decoder.decode(line) except ValueError as err: raise GerritError("Query returned invalid data: %s", err) if "type" in data and data["type"] == "error": raise GerritError("Query error: %s" % data["message"]) elif "project" in data: results.append(Change(data)) return results
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def start_event_stream(self): """ Start streaming events from `gerrit stream-events`. """
if not self._stream: self._stream = GerritStream(self, ssh_client=self._ssh_client) self._stream.start()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stop_event_stream(self): """ Stop streaming events from `gerrit stream-events`."""
if self._stream: self._stream.stop() self._stream.join() self._stream = None with self._events.mutex: self._events.queue.clear()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_event(self, block=True, timeout=None): """ Get the next event from the queue. :arg boolean block: Set to True to block if no event is available. :arg seconds timeout: Timeout to wait if no event is available. :Returns: The next event as a :class:`pygerrit.events.GerritEvent` instance, or `None` if: - `block` is False and there is no event available in the queue, or - `block` is True and no event is available within the time specified by `timeout`. """
try: return self._events.get(block, timeout) except Empty: return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def put_event(self, data): """ Create event from `data` and add it to the queue. :arg json data: The JSON data from which to create the event. :Raises: :class:`pygerrit.error.GerritError` if the queue is full, or the factory could not create the event. """
try: event = self._factory.create(data) self._events.put(event) except Full: raise GerritError("Unable to add event: queue is full")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _extract_version(version_string, pattern): """ Extract the version from `version_string` using `pattern`. Return the version as a string, with leading/trailing whitespace stripped. """
if version_string: match = pattern.match(version_string.strip()) if match: return match.group(1) return ""
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _configure(self): """ Configure the ssh parameters from the config file. """
configfile = expanduser("~/.ssh/config") if not isfile(configfile): raise GerritError("ssh config file '%s' does not exist" % configfile) config = SSHConfig() config.parse(open(configfile)) data = config.lookup(self.hostname) if not data: raise GerritError("No ssh config for host %s" % self.hostname) if 'hostname' not in data or 'port' not in data or 'user' not in data: raise GerritError("Missing configuration data in %s" % configfile) self.hostname = data['hostname'] self.username = data['user'] if 'identityfile' in data: key_filename = abspath(expanduser(data['identityfile'][0])) if not isfile(key_filename): raise GerritError("Identity file '%s' does not exist" % key_filename) self.key_filename = key_filename try: self.port = int(data['port']) except ValueError: raise GerritError("Invalid port: %s" % data['port']) if 'proxycommand' in data: self.proxy = ProxyCommand(data['proxycommand'])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _do_connect(self): """ Connect to the remote. """
self.load_system_host_keys() if self.username is None or self.port is None: self._configure() try: self.connect(hostname=self.hostname, port=self.port, username=self.username, key_filename=self.key_filename, sock=self.proxy) except socket.error as e: raise GerritError("Failed to connect to server: %s" % e) try: version_string = self._transport.remote_version pattern = re.compile(r'^.*GerritCodeReview_([a-z0-9-\.]*) .*$') self.remote_version = _extract_version(version_string, pattern) except AttributeError: self.remote_version = None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _connect(self): """ Connect to the remote if not already connected. """
if not self.connected.is_set(): try: self.lock.acquire() # Another thread may have connected while we were # waiting to acquire the lock if not self.connected.is_set(): self._do_connect() if self.keepalive: self._transport.set_keepalive(self.keepalive) self.connected.set() except GerritError: raise finally: self.lock.release()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_remote_version(self): """ Return the version of the remote Gerrit server. """
if self.remote_version is None: result = self.run_gerrit_command("version") version_string = result.stdout.read() pattern = re.compile(r'^gerrit version (.*)$') self.remote_version = _extract_version(version_string, pattern) return self.remote_version
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def register(cls, name): """ Decorator to register the event identified by `name`. Return the decorated class. Raise GerritError if the event is already registered. """
def decorate(klazz): """ Decorator. """ if name in cls._events: raise GerritError("Duplicate event: %s" % name) cls._events[name] = [klazz.__module__, klazz.__name__] klazz.name = name return klazz return decorate
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create(cls, data): """ Create a new event instance. Return an instance of the `GerritEvent` subclass after converting `data` to json. Raise GerritError if json parsed from `data` does not contain a `type` key. """
try: json_data = json.loads(data) except ValueError as err: logging.debug("Failed to load json data: %s: [%s]", str(err), data) json_data = json.loads(ErrorEvent.error_json(err)) if "type" not in json_data: raise GerritError("`type` not in json_data") name = json_data["type"] if name not in cls._events: name = 'unhandled-event' event = cls._events[name] module_name = event[0] class_name = event[1] module = __import__(module_name, fromlist=[module_name]) klazz = getattr(module, class_name) return klazz(json_data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def put(self, endpoint, **kwargs): """ Send HTTP PUT to the endpoint. :arg str endpoint: The endpoint to send to. :returns: JSON decoded result. :raises: requests.RequestException on timeout or connection error. """
kwargs.update(self.kwargs.copy()) if "data" in kwargs: kwargs["headers"].update( {"Content-Type": "application/json;charset=UTF-8"}) response = requests.put(self.make_url(endpoint), **kwargs) return _decode_response(response)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete(self, endpoint, **kwargs): """ Send HTTP DELETE to the endpoint. :arg str endpoint: The endpoint to send to. :returns: JSON decoded result. :raises: requests.RequestException on timeout or connection error. """
kwargs.update(self.kwargs.copy()) response = requests.delete(self.make_url(endpoint), **kwargs) return _decode_response(response)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def review(self, change_id, revision, review): """ Submit a review. :arg str change_id: The change ID. :arg str revision: The revision. :arg str review: The review details as a :class:`GerritReview`. :returns: JSON decoded result. :raises: requests.RequestException on timeout or connection error. """
endpoint = "changes/%s/revisions/%s/review" % (change_id, revision) self.post(endpoint, data=str(review))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_comments(self, comments): """ Add inline comments. :arg dict comments: Comments to add. Usage:: add_comments([{'filename': 'Makefile', 'line': 10, 'message': 'inline message'}]) add_comments([{'filename': 'Makefile', 'range': {'start_line': 0, 'start_character': 1, 'end_line': 0, 'end_character': 5}, 'message': 'inline message'}]) """
for comment in comments: if 'filename' and 'message' in comment.keys(): msg = {} if 'range' in comment.keys(): msg = {"range": comment['range'], "message": comment['message']} elif 'line' in comment.keys(): msg = {"line": comment['line'], "message": comment['message']} else: continue file_comment = {comment['filename']: [msg]} if self.comments: if comment['filename'] in self.comments.keys(): self.comments[comment['filename']].append(msg) else: self.comments.update(file_comment) else: self.comments.update(file_comment)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _register_server(self, server, timeout=30): '''Register a new SiriDB Server. This method is used by the SiriDB manage tool and should not be used otherwise. Full access rights are required for this request. ''' result = self._loop.run_until_complete( self._protocol.send_package(CPROTO_REQ_REGISTER_SERVER, data=server, timeout=timeout)) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _get_file(self, fn, timeout=30): '''Request a SiriDB configuration file. This method is used by the SiriDB manage tool and should not be used otherwise. Full access rights are required for this request. ''' msg = FILE_MAP.get(fn, None) if msg is None: raise FileNotFoundError('Cannot get file {!r}. Available file ' 'requests are: {}' .format(fn, ', '.join(FILE_MAP.keys()))) result = self._loop.run_until_complete( self._protocol.send_package(msg, timeout=timeout)) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _bits_to_float(bits, lower=-90.0, middle=0.0, upper=90.0): """Convert GeoHash bits to a float."""
for i in bits: if i: lower = middle else: upper = middle middle = (upper + lower) / 2 return middle
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _float_to_bits(value, lower=-90.0, middle=0.0, upper=90.0, length=15): """Convert a float to a list of GeoHash bits."""
ret = [] for i in range(length): if value >= middle: lower = middle ret.append(1) else: upper = middle ret.append(0) middle = (upper + lower) / 2 return ret
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _geohash_to_bits(value): """Convert a GeoHash to a list of GeoHash bits."""
b = map(BASE32MAP.get, value) ret = [] for i in b: out = [] for z in range(5): out.append(i & 0b1) i = i >> 1 ret += out[::-1] return ret
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _bits_to_geohash(value): """Convert a list of GeoHash bits to a GeoHash."""
ret = [] # Get 5 bits at a time for i in (value[i:i+5] for i in xrange(0, len(value), 5)): # Convert binary to integer # Note: reverse here, the slice above doesn't work quite right in reverse. total = sum([(bit*2**count) for count,bit in enumerate(i[::-1])]) ret.append(BASE32MAPR[total]) # Join the string and return return "".join(ret)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def adjacent(geohash, direction): """Return the adjacent geohash for a given direction."""
# Based on an MIT licensed implementation by Chris Veness from: # http://www.movable-type.co.uk/scripts/geohash.html assert direction in 'nsew', "Invalid direction: %s"%direction assert geohash, "Invalid geohash: %s"%geohash neighbor = { 'n': [ 'p0r21436x8zb9dcf5h7kjnmqesgutwvy', 'bc01fg45238967deuvhjyznpkmstqrwx' ], 's': [ '14365h7k9dcfesgujnmqp0r2twvyx8zb', '238967debc01fg45kmstqrwxuvhjyznp' ], 'e': [ 'bc01fg45238967deuvhjyznpkmstqrwx', 'p0r21436x8zb9dcf5h7kjnmqesgutwvy' ], 'w': [ '238967debc01fg45kmstqrwxuvhjyznp', '14365h7k9dcfesgujnmqp0r2twvyx8zb' ] } border = { 'n': [ 'prxz', 'bcfguvyz' ], 's': [ '028b', '0145hjnp' ], 'e': [ 'bcfguvyz', 'prxz' ], 'w': [ '0145hjnp', '028b' ] } last = geohash[-1] parent = geohash[0:-1] t = len(geohash) % 2 # Check for edge cases if (last in border[direction][t]) and (parent): parent = adjacent(parent, direction) return parent + BASESEQUENCE[neighbor[direction][t].index(last)]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def neighbors(geohash): """Return all neighboring geohashes."""
return { 'n': adjacent(geohash, 'n'), 'ne': adjacent(adjacent(geohash, 'n'), 'e'), 'e': adjacent(geohash, 'e'), 'se': adjacent(adjacent(geohash, 's'), 'e'), 's': adjacent(geohash, 's'), 'sw': adjacent(adjacent(geohash, 's'), 'w'), 'w': adjacent(geohash, 'w'), 'nw': adjacent(adjacent(geohash, 'n'), 'w'), 'c': geohash }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def run_id(self): '''Run name without whitespace ''' s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', self.__class__.__name__) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _init(self, run_conf, run_number=None): '''Initialization before a new run. ''' self.stop_run.clear() self.abort_run.clear() self._run_status = run_status.running self._write_run_number(run_number) self._init_run_conf(run_conf)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def connect_cancel(self, functions): '''Run given functions when a run is cancelled. ''' self._cancel_functions = [] for func in functions: if isinstance(func, basestring) and hasattr(self, func) and callable(getattr(self, func)): self._cancel_functions.append(getattr(self, func)) elif callable(func): self._cancel_functions.append(func) else: raise ValueError("Unknown function %s" % str(func))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def handle_cancel(self, **kwargs): '''Cancelling a run. ''' for func in self._cancel_functions: f_args = getargspec(func)[0] f_kwargs = {key: kwargs[key] for key in f_args if key in kwargs} func(**f_kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def run_run(self, run, conf=None, run_conf=None, use_thread=False, catch_exception=True): '''Runs a run in another thread. Non-blocking. Parameters ---------- run : class, object Run class or object. run_conf : str, dict, file Specific configuration for the run. use_thread : bool If True, run run in thread and returns blocking function. Returns ------- If use_thread is True, returns function, which blocks until thread terminates, and which itself returns run status. If use_thread is False, returns run status. ''' if isinstance(conf, basestring) and os.path.isfile(conf): logging.info('Updating configuration from file %s', os.path.abspath(conf)) elif conf is not None: logging.info('Updating configuration') conf = self.open_conf(conf) self._conf.update(conf) if isclass(run): # instantiate the class run = run(conf=self._conf) local_run_conf = {} # general parameters from conf if 'run_conf' in self._conf: logging.info('Updating run configuration using run_conf key from configuration') local_run_conf.update(self._conf['run_conf']) # check for class name, scan specific parameters from conf if run.__class__.__name__ in self._conf: logging.info('Updating run configuration using %s key from configuration' % (run.__class__.__name__,)) local_run_conf.update(self._conf[run.__class__.__name__]) if isinstance(run_conf, basestring) and os.path.isfile(run_conf): logging.info('Updating run configuration from file %s', os.path.abspath(run_conf)) elif run_conf is not None: logging.info('Updating run configuration') run_conf = self.open_conf(run_conf) # check for class name, scan specific parameters from conf if run.__class__.__name__ in run_conf: run_conf = run_conf[run.__class__.__name__] # run_conf parameter has highest priority, updated last local_run_conf.update(run_conf) if use_thread: self.current_run = run @thunkify(thread_name='RunThread', daemon=True, default_func=self.current_run.get_run_status) def run_run_in_thread(): return run.run(run_conf=local_run_conf) signal.signal(signal.SIGINT, self._signal_handler) logging.info('Press Ctrl-C to stop run') return run_run_in_thread() else: self.current_run = run status = run.run(run_conf=local_run_conf) if not catch_exception and status != run_status.finished: raise RuntimeError('Exception occurred. Please read the log.') return status
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def run_primlist(self, primlist, skip_remaining=False): '''Runs runs from a primlist. Parameters ---------- primlist : string Filename of primlist. skip_remaining : bool If True, skip remaining runs, if a run does not exit with status FINISHED. Note ---- Primlist is a text file of the following format (comment line by adding '#'): <module name (containing class) or class (in either case use dot notation)>; <scan parameter>=<value>; <another scan parameter>=<another value> ''' runlist = self.open_primlist(primlist) for index, run in enumerate(runlist): logging.info('Progressing with run %i out of %i...', index + 1, len(runlist)) join = self.run_run(run, use_thread=True) status = join() if skip_remaining and not status == run_status.finished: logging.error('Exited run %i with status %s: Skipping all remaining runs.', run.run_number, status) break
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def select_hits_from_cluster_info(input_file_hits, output_file_hits, cluster_size_condition, n_cluster_condition, chunk_size=4000000): ''' Takes a hit table and stores only selected hits into a new table. The selection is done on an event base and events are selected if they have a certain number of cluster or cluster size. To increase the analysis speed a event index for the input hit file is created first. Since a cluster hit table can be created to this way of hit selection is not needed anymore. Parameters ---------- input_file_hits: str the input file name with hits output_file_hits: str the output file name for the hits cluster_size_condition: str the cluster size condition to select events (e.g.: 'cluster_size_condition <= 2') n_cluster_condition: str the number of cluster in a event ((e.g.: 'n_cluster_condition == 1') ''' logging.info('Write hits of events from ' + str(input_file_hits) + ' with ' + cluster_size_condition + ' and ' + n_cluster_condition + ' into ' + str(output_file_hits)) with tb.open_file(input_file_hits, mode="r+") as in_hit_file_h5: analysis_utils.index_event_number(in_hit_file_h5.root.Hits) analysis_utils.index_event_number(in_hit_file_h5.root.Cluster) with tb.open_file(output_file_hits, mode="w") as out_hit_file_h5: hit_table_out = out_hit_file_h5.create_table(out_hit_file_h5.root, name='Hits', description=data_struct.HitInfoTable, title='hit_data', filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) cluster_table = in_hit_file_h5.root.Cluster last_word_number = 0 progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=cluster_table.shape[0], term_width=80) progress_bar.start() for data, index in analysis_utils.data_aligned_at_events(cluster_table, chunk_size=chunk_size): selected_events_1 = analysis_utils.get_events_with_cluster_size(event_number=data['event_number'], cluster_size=data['size'], condition=cluster_size_condition) # select the events with clusters of a certain size selected_events_2 = analysis_utils.get_events_with_n_cluster(event_number=data['event_number'], condition=n_cluster_condition) # select the events with a certain cluster number selected_events = analysis_utils.get_events_in_both_arrays(selected_events_1, selected_events_2) # select events with both conditions above logging.debug('Selected ' + str(len(selected_events)) + ' events with ' + n_cluster_condition + ' and ' + cluster_size_condition) last_word_number = analysis_utils.write_hits_in_events(hit_table_in=in_hit_file_h5.root.Hits, hit_table_out=hit_table_out, events=selected_events, start_hit_word=last_word_number) # write the hits of the selected events into a new table progress_bar.update(index) progress_bar.finish() in_hit_file_h5.root.meta_data.copy(out_hit_file_h5.root)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def histogram_cluster_table(analyzed_data_file, output_file, chunk_size=10000000): '''Reads in the cluster info table in chunks and histograms the seed pixels into one occupancy array. The 3rd dimension of the occupancy array is the number of different scan parameters used Parameters ---------- analyzed_data_file : string HDF5 filename of the file containing the cluster table. If a scan parameter is given in the meta data, the occupancy histogramming is done per scan parameter step. Returns ------- occupancy_array: numpy.array with dimensions (col, row, #scan_parameter) ''' with tb.open_file(analyzed_data_file, mode="r") as in_file_h5: with tb.open_file(output_file, mode="w") as out_file_h5: histogram = PyDataHistograming() histogram.create_occupancy_hist(True) scan_parameters = None event_number_indices = None scan_parameter_indices = None try: meta_data = in_file_h5.root.meta_data[:] scan_parameters = analysis_utils.get_unique_scan_parameter_combinations(meta_data) if scan_parameters is not None: scan_parameter_indices = np.array(range(0, len(scan_parameters)), dtype='u4') event_number_indices = np.ascontiguousarray(scan_parameters['event_number']).astype(np.uint64) histogram.add_meta_event_index(event_number_indices, array_length=len(scan_parameters['event_number'])) histogram.add_scan_parameter(scan_parameter_indices) logging.info("Add %d different scan parameter(s) for analysis", len(scan_parameters)) else: logging.info("No scan parameter data provided") histogram.set_no_scan_parameter() except tb.exceptions.NoSuchNodeError: logging.info("No meta data provided, use no scan parameter") histogram.set_no_scan_parameter() logging.info('Histogram cluster seeds...') progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=in_file_h5.root.Cluster.shape[0], term_width=80) progress_bar.start() total_cluster = 0 # to check analysis for cluster, index in analysis_utils.data_aligned_at_events(in_file_h5.root.Cluster, chunk_size=chunk_size): total_cluster += len(cluster) histogram.add_cluster_seed_hits(cluster, len(cluster)) progress_bar.update(index) progress_bar.finish() filter_table = tb.Filters(complib='blosc', complevel=5, fletcher32=False) # compression of the written data occupancy_array = histogram.get_occupancy().T occupancy_array_table = out_file_h5.create_carray(out_file_h5.root, name='HistOcc', title='Occupancy Histogram', atom=tb.Atom.from_dtype(occupancy_array.dtype), shape=occupancy_array.shape, filters=filter_table) occupancy_array_table[:] = occupancy_array if total_cluster != np.sum(occupancy_array): logging.warning('Analysis shows inconsistent number of cluster used. Check needed!') in_file_h5.root.meta_data.copy(out_file_h5.root)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def analyze_hits_per_scan_parameter(analyze_data, scan_parameters=None, chunk_size=50000): '''Takes the hit table and analyzes the hits per scan parameter Parameters ---------- analyze_data : analysis.analyze_raw_data.AnalyzeRawData object with an opened hit file (AnalyzeRawData.out_file_h5) or a file name with the hit data given (AnalyzeRawData._analyzed_data_file) scan_parameters : list of strings: The names of the scan parameters to use chunk_size : int: The chunk size of one hit table read. The bigger the faster. Too big causes memory errors. Returns ------- yields the analysis.analyze_raw_data.AnalyzeRawData for each scan parameter ''' if analyze_data.out_file_h5 is None or analyze_data.out_file_h5.isopen == 0: in_hit_file_h5 = tb.open_file(analyze_data._analyzed_data_file, 'r+') close_file = True else: in_hit_file_h5 = analyze_data.out_file_h5 close_file = False meta_data = in_hit_file_h5.root.meta_data[:] # get the meta data table try: hit_table = in_hit_file_h5.root.Hits # get the hit table except tb.NoSuchNodeError: logging.error('analyze_hits_per_scan_parameter needs a hit table, but no hit table found.') return meta_data_table_at_scan_parameter = analysis_utils.get_unique_scan_parameter_combinations(meta_data, scan_parameters=scan_parameters) parameter_values = analysis_utils.get_scan_parameters_table_from_meta_data(meta_data_table_at_scan_parameter, scan_parameters) event_number_ranges = analysis_utils.get_ranges_from_array(meta_data_table_at_scan_parameter['event_number']) # get the event number ranges for the different scan parameter settings analysis_utils.index_event_number(hit_table) # create a event_numer index to select the hits by their event number fast, no needed but important for speed up # variables for read speed up index = 0 # index where to start the read out of the hit table, 0 at the beginning, increased during looping best_chunk_size = chunk_size # number of hits to copy to RAM during looping, the optimal chunk size is determined during looping # loop over the selected events for parameter_index, (start_event_number, stop_event_number) in enumerate(event_number_ranges): logging.info('Analyze hits for ' + str(scan_parameters) + ' = ' + str(parameter_values[parameter_index])) analyze_data.reset() # resets the front end data of the last analysis step but not the options readout_hit_len = 0 # variable to calculate a optimal chunk size value from the number of hits for speed up # loop over the hits in the actual selected events with optimizations: determine best chunk size, start word index given for hits, index in analysis_utils.data_aligned_at_events(hit_table, start_event_number=start_event_number, stop_event_number=stop_event_number, start_index=index, chunk_size=best_chunk_size): analyze_data.analyze_hits(hits, scan_parameter=False) # analyze the selected hits in chunks readout_hit_len += hits.shape[0] best_chunk_size = int(1.5 * readout_hit_len) if int(1.05 * readout_hit_len) < chunk_size and int(1.05 * readout_hit_len) > 1e3 else chunk_size # to increase the readout speed, estimated the number of hits for one read instruction file_name = " ".join(re.findall("[a-zA-Z0-9]+", str(scan_parameters))) + '_' + " ".join(re.findall("[a-zA-Z0-9]+", str(parameter_values[parameter_index]))) analyze_data._create_additional_hit_data(safe_to_file=False) analyze_data._create_additional_cluster_data(safe_to_file=False) yield analyze_data, file_name if close_file: in_hit_file_h5.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def reset_bunch_counter(self): '''Resetting Bunch Counter ''' logging.info('Resetting Bunch Counter') commands = [] commands.extend(self.register.get_commands("RunMode")) commands.extend(self.register.get_commands("BCR")) self.send_commands(commands) time.sleep(0.1) commands = [] commands.extend(self.register.get_commands("ConfMode")) self.send_commands(commands)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def generate_threshold_mask(hist): '''Masking array elements when equal 0.0 or greater than 10 times the median Parameters ---------- hist : array_like Input data. Returns ------- masked array Returns copy of the array with masked elements. ''' masked_array = np.ma.masked_values(hist, 0) masked_array = np.ma.masked_greater(masked_array, 10 * np.ma.median(hist)) logging.info('Masking %d pixel(s)', np.ma.count_masked(masked_array)) return np.ma.getmaskarray(masked_array)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_ranges_from_array(arr, append_last=True): '''Takes an array and calculates ranges [start, stop[. The last range end is none to keep the same length. Parameters ---------- arr : array like append_last: bool If True, append item with a pair of last array item and None. Returns ------- numpy.array The array formed by pairs of values by the given array. Example ------- >>> a = np.array((1,2,3,4)) >>> get_ranges_from_array(a, append_last=True) array([[1, 2], [2, 3], [3, 4], [4, None]]) >>> get_ranges_from_array(a, append_last=False) array([[1, 2], [2, 3], [3, 4]]) ''' right = arr[1:] if append_last: left = arr[:] right = np.append(right, None) else: left = arr[:-1] return np.column_stack((left, right))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def in1d_sorted(ar1, ar2): """ Does the same than np.in1d but uses the fact that ar1 and ar2 are sorted. Is therefore much faster. """
if ar1.shape[0] == 0 or ar2.shape[0] == 0: # check for empty arrays to avoid crash return [] inds = ar2.searchsorted(ar1) inds[inds == len(ar2)] = 0 return ar2[inds] == ar1
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_parameter_value_from_file_names(files, parameters=None, unique=False, sort=True): """ Takes a list of files, searches for the parameter name in the file name and returns a ordered dict with the file name in the first dimension and the corresponding parameter value in the second. The file names can be sorted by the parameter value, otherwise the order is kept. If unique is true every parameter is unique and mapped to the file name that occurred last in the files list. Parameters files : list of strings parameter : string or list of strings unique : bool sort : bool Returns ------- collections.OrderedDict """
# unique=False logging.debug('Get the parameter: ' + str(parameters) + ' values from the file names of ' + str(len(files)) + ' files') files_dict = collections.OrderedDict() if parameters is None: # special case, no parameter defined return files_dict if isinstance(parameters, basestring): parameters = (parameters, ) search_string = '_'.join(parameters) for _ in parameters: search_string += r'_(-?\d+)' result = {} for one_file in files: parameter_values = re.findall(search_string, one_file) if parameter_values: if isinstance(parameter_values[0], tuple): parameter_values = list(reduce(lambda t1, t2: t1 + t2, parameter_values)) parameter_values = [[int(i), ] for i in parameter_values] # convert string value to list with int files_dict[one_file] = dict(zip(parameters, parameter_values)) if unique: # reduce to the files with different scan parameters for key, value in files_dict.items(): if value not in result.values(): result[key] = value else: result[one_file] = files_dict[one_file] return collections.OrderedDict(sorted(result.iteritems(), key=itemgetter(1)) if sort else files_dict)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_data_file_names_from_scan_base(scan_base, filter_str=['_analyzed.h5', '_interpreted.h5', '_cut.h5', '_result.h5', '_hists.h5'], sort_by_time=True, meta_data_v2=True): """ Generate a list of .h5 files which have a similar file name. Parameters scan_base : list, string List of string or string of the scan base names. The scan_base will be used to search for files containing the string. The .h5 file extension will be added automatically. filter : list, string List of string or string which are used to filter the returned filenames. File names containing filter_str in the file name will not be returned. Use None to disable filter. sort_by_time : bool If True, return file name list sorted from oldest to newest. The time from meta table will be used to sort the files. meta_data_v2 : bool True for new (v2) meta data format, False for the old (v1) format. Returns ------- data_files : list List of file names matching the obove conditions. """
data_files = [] if scan_base is None: return data_files if isinstance(scan_base, basestring): scan_base = [scan_base] for scan_base_str in scan_base: if '.h5' == os.path.splitext(scan_base_str)[1]: data_files.append(scan_base_str) else: data_files.extend(glob.glob(scan_base_str + '*.h5')) if filter_str: if isinstance(filter_str, basestring): filter_str = [filter_str] data_files = filter(lambda data_file: not any([(True if x in data_file else False) for x in filter_str]), data_files) if sort_by_time and len(data_files) > 1: f_list = {} for data_file in data_files: with tb.open_file(data_file, mode="r") as h5_file: try: meta_data = h5_file.root.meta_data except tb.NoSuchNodeError: logging.warning("File %s is missing meta_data" % h5_file.filename) else: try: if meta_data_v2: timestamp = meta_data[0]["timestamp_start"] else: timestamp = meta_data[0]["timestamp"] except IndexError: logging.info("File %s has empty meta_data" % h5_file.filename) else: f_list[data_file] = timestamp data_files = list(sorted(f_list, key=f_list.__getitem__, reverse=False)) return data_files
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_parameter_from_files(files, parameters=None, unique=False, sort=True): ''' Takes a list of files, searches for the parameter name in the file name and in the file. Returns a ordered dict with the file name in the first dimension and the corresponding parameter values in the second. If a scan parameter appears in the file name and in the file the first parameter setting has to be in the file name, otherwise a warning is shown. The file names can be sorted by the first parameter value of each file. Parameters ---------- files : string, list of strings parameters : string, list of strings unique : boolean If set only one file per scan parameter value is used. sort : boolean Returns ------- collections.OrderedDict ''' logging.debug('Get the parameter ' + str(parameters) + ' values from ' + str(len(files)) + ' files') files_dict = collections.OrderedDict() if isinstance(files, basestring): files = (files, ) if isinstance(parameters, basestring): parameters = (parameters, ) parameter_values_from_file_names_dict = get_parameter_value_from_file_names(files, parameters, unique=unique, sort=sort) # get the parameter from the file name for file_name in files: with tb.open_file(file_name, mode="r") as in_file_h5: # open the actual file scan_parameter_values = collections.OrderedDict() try: scan_parameters = in_file_h5.root.scan_parameters[:] # get the scan parameters from the scan parameter table if parameters is None: parameters = get_scan_parameter_names(scan_parameters) for parameter in parameters: try: scan_parameter_values[parameter] = np.unique(scan_parameters[parameter]).tolist() # different scan parameter values used except ValueError: # the scan parameter does not exists pass except tb.NoSuchNodeError: # scan parameter table does not exist try: scan_parameters = get_scan_parameter(in_file_h5.root.meta_data[:]) # get the scan parameters from the meta data if scan_parameters: try: scan_parameter_values = np.unique(scan_parameters[parameters]).tolist() # different scan parameter values used except ValueError: # the scan parameter does not exists pass except tb.NoSuchNodeError: # meta data table does not exist pass if not scan_parameter_values: # if no scan parameter values could be set from file take the parameter found in the file name try: scan_parameter_values = parameter_values_from_file_names_dict[file_name] except KeyError: # no scan parameter found at all, neither in the file name nor in the file scan_parameter_values = None else: # use the parameter given in the file and cross check if it matches the file name parameter if these is given try: for key, value in scan_parameter_values.items(): if value and value[0] != parameter_values_from_file_names_dict[file_name][key][0]: # parameter value exists: check if the first value is the file name value logging.warning('Parameter values in the file name and in the file differ. Take ' + str(key) + ' parameters ' + str(value) + ' found in %s.', file_name) except KeyError: # parameter does not exists in the file name pass except IndexError: raise IncompleteInputError('Something wrong check!') if unique and scan_parameter_values is not None: existing = False for parameter in scan_parameter_values: # loop to determine if any value of any scan parameter exists already all_par_values = [values[parameter] for values in files_dict.values()] if any(x in [scan_parameter_values[parameter]] for x in all_par_values): existing = True break if not existing: files_dict[file_name] = scan_parameter_values else: logging.warning('Scan parameter value(s) from %s exists already, do not add to result', file_name) else: files_dict[file_name] = scan_parameter_values return collections.OrderedDict(sorted(files_dict.iteritems(), key=itemgetter(1)) if sort else files_dict)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_parameter_similarity(files_dict): """ Checks if the parameter names of all files are similar. Takes the dictionary from get_parameter_from_files output as input. """
try: parameter_names = files_dict.itervalues().next().keys() # get the parameter names of the first file, to check if these are the same in the other files except AttributeError: # if there is no parameter at all if any(i is not None for i in files_dict.itervalues()): # check if there is also no parameter for the other files return False else: return True if any(parameter_names != i.keys() for i in files_dict.itervalues()): return False return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def combine_meta_data(files_dict, meta_data_v2=True): """ Takes the dict of hdf5 files and combines their meta data tables into one new numpy record array. Parameters meta_data_v2 : bool True for new (v2) meta data format, False for the old (v1) format. """
if len(files_dict) > 10: logging.info("Combine the meta data from %d files", len(files_dict)) # determine total length needed for the new combined array, thats the fastest way to combine arrays total_length = 0 # the total length of the new table for file_name in files_dict.iterkeys(): with tb.open_file(file_name, mode="r") as in_file_h5: # open the actual file total_length += in_file_h5.root.meta_data.shape[0] if meta_data_v2: meta_data_combined = np.empty((total_length, ), dtype=[ ('index_start', np.uint32), ('index_stop', np.uint32), ('data_length', np.uint32), ('timestamp_start', np.float64), ('timestamp_stop', np.float64), ('error', np.uint32)]) else: meta_data_combined = np.empty((total_length, ), dtype=[ ('start_index', np.uint32), ('stop_index', np.uint32), ('length', np.uint32), ('timestamp', np.float64), ('error', np.uint32)]) if len(files_dict) > 10: progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=total_length, term_width=80) progress_bar.start() index = 0 # fill actual result array for file_name in files_dict.iterkeys(): with tb.open_file(file_name, mode="r") as in_file_h5: # open the actual file array_length = in_file_h5.root.meta_data.shape[0] meta_data_combined[index:index + array_length] = in_file_h5.root.meta_data[:] index += array_length if len(files_dict) > 10: progress_bar.update(index) if len(files_dict) > 10: progress_bar.finish() return meta_data_combined
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def reduce_sorted_to_intersect(ar1, ar2): """ Takes two sorted arrays and return the intersection ar1 in ar2, ar2 in ar1. Parameters ar1 : (M,) array_like Input array. ar2 : array_like Input array. Returns ------- ar1, ar1 : ndarray, ndarray The intersection values. """
# Ravel both arrays, behavior for the first array could be different ar1 = np.asarray(ar1).ravel() ar2 = np.asarray(ar2).ravel() # get min max values of the arrays ar1_biggest_value = ar1[-1] ar1_smallest_value = ar1[0] ar2_biggest_value = ar2[-1] ar2_smallest_value = ar2[0] if ar1_biggest_value < ar2_smallest_value or ar1_smallest_value > ar2_biggest_value: # special case, no intersection at all return ar1[0:0], ar2[0:0] # get min/max indices with values that are also in the other array min_index_ar1 = np.argmin(ar1 < ar2_smallest_value) max_index_ar1 = np.argmax(ar1 > ar2_biggest_value) min_index_ar2 = np.argmin(ar2 < ar1_smallest_value) max_index_ar2 = np.argmax(ar2 > ar1_biggest_value) if min_index_ar1 < 0: min_index_ar1 = 0 if min_index_ar2 < 0: min_index_ar2 = 0 if max_index_ar1 == 0 or max_index_ar1 > ar1.shape[0]: max_index_ar1 = ar1.shape[0] if max_index_ar2 == 0 or max_index_ar2 > ar2.shape[0]: max_index_ar2 = ar2.shape[0] # reduce the data return ar1[min_index_ar1:max_index_ar1], ar2[min_index_ar2:max_index_ar2]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_not_unique_values(array): '''Returns the values that appear at least twice in array. Parameters ---------- array : array like Returns ------- numpy.array ''' s = np.sort(array, axis=None) s = s[s[1:] == s[:-1]] return np.unique(s)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_meta_data_index_at_scan_parameter(meta_data_array, scan_parameter_name): '''Takes the analyzed meta_data table and returns the indices where the scan parameter changes Parameters ---------- meta_data_array : numpy.recordarray scan_parameter_name : string Returns ------- numpy.ndarray: first dimension: scan parameter value second dimension: index where scan parameter value was used first ''' scan_parameter_values = meta_data_array[scan_parameter_name] diff = np.concatenate(([1], np.diff(scan_parameter_values))) idx = np.concatenate((np.where(diff)[0], [len(scan_parameter_values)])) index = np.empty(len(idx) - 1, dtype={'names': [scan_parameter_name, 'index'], 'formats': ['u4', 'u4']}) index[scan_parameter_name] = scan_parameter_values[idx[:-1]] index['index'] = idx[:-1] return index
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_hits_in_events(hits_array, events, assume_sorted=True, condition=None): '''Selects the hits that occurred in events and optional selection criterion. If a event range can be defined use the get_data_in_event_range function. It is much faster. Parameters ---------- hits_array : numpy.array events : array assume_sorted : bool Is true if the events to select are sorted from low to high value. Increases speed by 35%. condition : string A condition that is applied to the hits in numexpr. Only if the expression evaluates to True the hit is taken. Returns ------- numpy.array hit array with the hits in events. ''' logging.debug("Calculate hits that exists in the given %d events." % len(events)) if assume_sorted: events, _ = reduce_sorted_to_intersect(events, hits_array['event_number']) # reduce the event number range to the max min event number of the given hits to save time if events.shape[0] == 0: # if there is not a single selected hit return hits_array[0:0] try: if assume_sorted: selection = analysis_utils.in1d_events(hits_array['event_number'], events) else: logging.warning('Events are usually sorted. Are you sure you want this?') selection = np.in1d(hits_array['event_number'], events) if condition is None: hits_in_events = hits_array[selection] else: # bad hack to be able to use numexpr for variable in set(re.findall(r'[a-zA-Z_]+', condition)): exec(variable + ' = hits_array[\'' + variable + '\']') hits_in_events = hits_array[ne.evaluate(condition + ' & selection')] except MemoryError: logging.error('There are too many hits to do in RAM operations. Consider decreasing chunk size and use the write_hits_in_events function instead.') raise MemoryError return hits_in_events
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_hits_of_scan_parameter(input_file_hits, scan_parameters=None, try_speedup=False, chunk_size=10000000): '''Takes the hit table of a hdf5 file and returns hits in chunks for each unique combination of scan_parameters. Yields the hits in chunks, since they usually do not fit into memory. Parameters ---------- input_file_hits : pytable hdf5 file Has to include a hits node scan_parameters : iterable with strings try_speedup : bool If true a speed up by searching for the event numbers in the data is done. If the event numbers are not in the data this slows down the search. chunk_size : int How many rows of data are read into ram. Returns ------- Yields tuple, numpy.array Actual scan parameter tuple, hit array with the hits of a chunk of the given scan parameter tuple ''' with tb.open_file(input_file_hits, mode="r+") as in_file_h5: hit_table = in_file_h5.root.Hits meta_data = in_file_h5.root.meta_data[:] meta_data_table_at_scan_parameter = get_unique_scan_parameter_combinations(meta_data, scan_parameters=scan_parameters) parameter_values = get_scan_parameters_table_from_meta_data(meta_data_table_at_scan_parameter, scan_parameters) event_number_ranges = get_ranges_from_array(meta_data_table_at_scan_parameter['event_number']) # get the event number ranges for the different scan parameter settings index_event_number(hit_table) # create a event_numer index to select the hits by their event number fast, no needed but important for speed up # # variables for read speed up index = 0 # index where to start the read out of the hit table, 0 at the beginning, increased during looping best_chunk_size = chunk_size # number of hits to copy to RAM during looping, the optimal chunk size is determined during looping # loop over the selected events for parameter_index, (start_event_number, stop_event_number) in enumerate(event_number_ranges): logging.debug('Read hits for ' + str(scan_parameters) + ' = ' + str(parameter_values[parameter_index])) readout_hit_len = 0 # variable to calculate a optimal chunk size value from the number of hits for speed up # loop over the hits in the actual selected events with optimizations: determine best chunk size, start word index given for hits, index in data_aligned_at_events(hit_table, start_event_number=start_event_number, stop_event_number=stop_event_number, start_index=index, try_speedup=try_speedup, chunk_size=best_chunk_size): yield parameter_values[parameter_index], hits readout_hit_len += hits.shape[0] best_chunk_size = int(1.5 * readout_hit_len) if int(1.05 * readout_hit_len) < chunk_size and int(1.05 * readout_hit_len) > 1e3 else chunk_size
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def write_hits_in_events(hit_table_in, hit_table_out, events, start_hit_word=0, chunk_size=5000000, condition=None): '''Selects the hits that occurred in events and writes them to a pytable. This function reduces the in RAM operations and has to be used if the get_hits_in_events function raises a memory error. Also a condition can be set to select hits. Parameters ---------- hit_table_in : pytable.table hit_table_out : pytable.table functions need to be able to write to hit_table_out events : array like defines the events to be written from hit_table_in to hit_table_out. They do not have to exists at all. start_hit_word: int Index of the first hit word to be analyzed. Used for speed up. chunk_size : int defines how many hits are analyzed in RAM. Bigger numbers increase the speed, too big numbers let the program crash with a memory error. condition : string A condition that is applied to the hits in numexpr style. Only if the expression evaluates to True the hit is taken. Returns ------- start_hit_word: int Index of the last hit word analyzed. Used to speed up the next call of write_hits_in_events. ''' if len(events) > 0: # needed to avoid crash min_event = np.amin(events) max_event = np.amax(events) logging.debug("Write hits from hit number >= %d that exists in the selected %d events with %d <= event number <= %d into a new hit table." % (start_hit_word, len(events), min_event, max_event)) table_size = hit_table_in.shape[0] iHit = 0 for iHit in range(start_hit_word, table_size, chunk_size): hits = hit_table_in.read(iHit, iHit + chunk_size) last_event_number = hits[-1]['event_number'] hit_table_out.append(get_hits_in_events(hits, events=events, condition=condition)) if last_event_number > max_event: # speed up, use the fact that the hits are sorted by event_number return iHit return start_hit_word
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def write_hits_in_event_range(hit_table_in, hit_table_out, event_start=None, event_stop=None, start_hit_word=0, chunk_size=5000000, condition=None): '''Selects the hits that occurred in given event range [event_start, event_stop[ and write them to a pytable. This function reduces the in RAM operations and has to be used if the get_data_in_event_range function raises a memory error. Also a condition can be set to select hits. Parameters ---------- hit_table_in : pytable.table hit_table_out : pytable.table functions need to be able to write to hit_table_out event_start, event_stop : int, None start/stop event numbers. Stop event number is excluded. If None start/stop is set automatically. chunk_size : int defines how many hits are analyzed in RAM. Bigger numbers increase the speed, too big numbers let the program crash with a memory error. condition : string A condition that is applied to the hits in numexpr style. Only if the expression evaluates to True the hit is taken. Returns ------- start_hit_word: int Index of the last hit word analyzed. Used to speed up the next call of write_hits_in_events. ''' logging.debug('Write hits that exists in the given event range from + ' + str(event_start) + ' to ' + str(event_stop) + ' into a new hit table') table_size = hit_table_in.shape[0] for iHit in range(0, table_size, chunk_size): hits = hit_table_in.read(iHit, iHit + chunk_size) last_event_number = hits[-1]['event_number'] selected_hits = get_data_in_event_range(hits, event_start=event_start, event_stop=event_stop) if condition is not None: # bad hack to be able to use numexpr for variable in set(re.findall(r'[a-zA-Z_]+', condition)): exec(variable + ' = hits[\'' + variable + '\']') selected_hits = selected_hits[ne.evaluate(condition)] hit_table_out.append(selected_hits) if last_event_number > event_stop: # speed up, use the fact that the hits are sorted by event_number return iHit + chunk_size return start_hit_word
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_events_with_n_cluster(event_number, condition='n_cluster==1'): '''Selects the events with a certain number of cluster. Parameters ---------- event_number : numpy.array Returns ------- numpy.array ''' logging.debug("Calculate events with clusters where " + condition) n_cluster_in_events = analysis_utils.get_n_cluster_in_events(event_number) n_cluster = n_cluster_in_events[:, 1] # return np.take(n_cluster_in_events, ne.evaluate(condition), axis=0) # does not return 1d, bug? return n_cluster_in_events[ne.evaluate(condition), 0]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_events_with_cluster_size(event_number, cluster_size, condition='cluster_size==1'): '''Selects the events with cluster of a given cluster size. Parameters ---------- event_number : numpy.array cluster_size : numpy.array condition : string Returns ------- numpy.array ''' logging.debug("Calculate events with clusters with " + condition) return np.unique(event_number[ne.evaluate(condition)])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_events_with_error_code(event_number, event_status, select_mask=0b1111111111111111, condition=0b0000000000000000): '''Selects the events with a certain error code. Parameters ---------- event_number : numpy.array event_status : numpy.array select_mask : int The mask that selects the event error code to check. condition : int The value the selected event error code should have. Returns ------- numpy.array ''' logging.debug("Calculate events with certain error code") return np.unique(event_number[event_status & select_mask == condition])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_scan_parameter(meta_data_array, unique=True): '''Takes the numpy meta data array and returns the different scan parameter settings and the name aligned in a dictionary Parameters ---------- meta_data_array : numpy.ndarray unique: boolean If true only unique values for each scan parameter are returned Returns ------- python.dict{string, numpy.Histogram}: A dictionary with the scan parameter name/values pairs ''' try: last_not_parameter_column = meta_data_array.dtype.names.index('error_code') # for interpreted meta_data except ValueError: last_not_parameter_column = meta_data_array.dtype.names.index('error') # for raw data file meta_data if last_not_parameter_column == len(meta_data_array.dtype.names) - 1: # no meta_data found return scan_parameters = collections.OrderedDict() for scan_par_name in meta_data_array.dtype.names[4:]: # scan parameters are in columns 5 (= index 4) and above scan_parameters[scan_par_name] = np.unique(meta_data_array[scan_par_name]) if unique else meta_data_array[scan_par_name] return scan_parameters
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_unique_scan_parameter_combinations(meta_data_array, scan_parameters=None, scan_parameter_columns_only=False): '''Takes the numpy meta data array and returns the first rows with unique combinations of different scan parameter values for selected scan parameters. If selected columns only is true, the returned histogram only contains the selected columns. Parameters ---------- meta_data_array : numpy.ndarray scan_parameters : list of string, None Scan parameter names taken. If None all are used. selected_columns_only : bool Returns ------- numpy.Histogram ''' try: last_not_parameter_column = meta_data_array.dtype.names.index('error_code') # for interpreted meta_data except ValueError: last_not_parameter_column = meta_data_array.dtype.names.index('error') # for raw data file meta_data if last_not_parameter_column == len(meta_data_array.dtype.names) - 1: # no meta_data found return if scan_parameters is None: return unique_row(meta_data_array, use_columns=range(4, len(meta_data_array.dtype.names)), selected_columns_only=scan_parameter_columns_only) else: use_columns = [] for scan_parameter in scan_parameters: try: use_columns.append(meta_data_array.dtype.names.index(scan_parameter)) except ValueError: logging.error('No scan parameter ' + scan_parameter + ' found') raise RuntimeError('Scan parameter not found') return unique_row(meta_data_array, use_columns=use_columns, selected_columns_only=scan_parameter_columns_only)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def select_good_pixel_region(hits, col_span, row_span, min_cut_threshold=0.2, max_cut_threshold=2.0): '''Takes the hit array and masks all pixels with a certain occupancy. Parameters ---------- hits : array like If dim > 2 the additional dimensions are summed up. min_cut_threshold : float A number to specify the minimum threshold, which pixel to take. Pixels are masked if occupancy < min_cut_threshold * np.ma.median(occupancy) 0 means that no pixels are masked max_cut_threshold : float A number to specify the maximum threshold, which pixel to take. Pixels are masked if occupancy > max_cut_threshold * np.ma.median(occupancy) Can be set to None that no pixels are masked by max_cut_threshold Returns ------- numpy.ma.array, shape=(80,336) The hits array with masked pixels. ''' hits = np.sum(hits, axis=(-1)).astype('u8') mask = np.ones(shape=(80, 336), dtype=np.uint8) mask[min(col_span):max(col_span) + 1, min(row_span):max(row_span) + 1] = 0 ma = np.ma.masked_where(mask, hits) if max_cut_threshold is not None: return np.ma.masked_where(np.logical_or(ma < min_cut_threshold * np.ma.median(ma), ma > max_cut_threshold * np.ma.median(ma)), ma) else: return np.ma.masked_where(ma < min_cut_threshold * np.ma.median(ma), ma)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_hit_rate_correction(gdacs, calibration_gdacs, cluster_size_histogram): '''Calculates a correction factor for single hit clusters at the given GDACs from the cluster_size_histogram via cubic interpolation. Parameters ---------- gdacs : array like The GDAC settings where the threshold should be determined from the calibration calibration_gdacs : array like GDAC settings used during the source scan for the cluster size calibration. cluster_size_histogram : numpy.array, shape=(80,336,# of GDACs during calibration) The calibration array Returns ------- numpy.array, shape=(80,336,# of GDACs during calibration) The threshold values for each pixel at gdacs. ''' logging.info('Calculate the correction factor for the single hit cluster rate at %d given GDAC settings', len(gdacs)) if len(calibration_gdacs) != cluster_size_histogram.shape[0]: raise ValueError('Length of the provided pixel GDACs does not match the dimension of the cluster size array') hist_sum = np.sum(cluster_size_histogram, axis=1) hist_rel = cluster_size_histogram / hist_sum[:, np.newaxis].astype('f4') * 100. maximum_rate = np.amax(hist_rel[:, 1]) correction_factor = maximum_rate / hist_rel[:, 1] # sort arrays since interpolate does not work otherwise calibration_gdacs_sorted = np.array(calibration_gdacs) correction_factor_sorted = correction_factor[np.argsort(calibration_gdacs_sorted)] calibration_gdacs_sorted = np.sort(calibration_gdacs_sorted) interpolation = interp1d(calibration_gdacs_sorted.tolist(), correction_factor_sorted.tolist(), kind='cubic', bounds_error=True) return interpolation(gdacs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_mean_threshold_from_calibration(gdac, mean_threshold_calibration): '''Calculates the mean threshold from the threshold calibration at the given gdac settings. If the given gdac value was not used during caluibration the value is determined by interpolation. Parameters ---------- gdacs : array like The GDAC settings where the threshold should be determined from the calibration mean_threshold_calibration : pytable The table created during the calibration scan. Returns ------- numpy.array, shape=(len(gdac), ) The mean threshold values at each value in gdacs. ''' interpolation = interp1d(mean_threshold_calibration['parameter_value'], mean_threshold_calibration['mean_threshold'], kind='slinear', bounds_error=True) return interpolation(gdac)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_pixel_thresholds_from_calibration_array(gdacs, calibration_gdacs, threshold_calibration_array, bounds_error=True): '''Calculates the threshold for all pixels in threshold_calibration_array at the given GDAC settings via linear interpolation. The GDAC settings used during calibration have to be given. Parameters ---------- gdacs : array like The GDAC settings where the threshold should be determined from the calibration calibration_gdacs : array like GDAC settings used during calibration, needed to translate the index of the calibration array to a value. threshold_calibration_array : numpy.array, shape=(80,336,# of GDACs during calibration) The calibration array Returns ------- numpy.array, shape=(80,336,# gdacs given) The threshold values for each pixel at gdacs. ''' if len(calibration_gdacs) != threshold_calibration_array.shape[2]: raise ValueError('Length of the provided pixel GDACs does not match the third dimension of the calibration array') interpolation = interp1d(x=calibration_gdacs, y=threshold_calibration_array, kind='slinear', bounds_error=bounds_error) return interpolation(gdacs)