Best Python code snippet using slash
HDFSSelectableResultsTable.py
Source:HDFSSelectableResultsTable.py
1#2# Splunk UI module python renderer3# This module is imported by the module loader (lib.module.ModuleMapper) into4# the splunk.appserver.mrsparkle.controllers.module.* namespace.5#6# required imports7import cherrypy8import controllers.module as module9# common imports10import splunk, splunk.search, splunk.util, splunk.entity11import lib.util as util12import lib.i18n as i18n13# logging setup14import logging15logger = logging.getLogger('splunk.appserver.controllers.module.SimpleResultsTable')16import math17import cgi18import decimal19# define standard time field name20TIME_FIELD = '_time'21RAW_FIELD = '_raw'22# define wrapper for rendering multi-value fields23MULTI_VALUE_WRAPPER = '<div class="mv">%s</div>'24# define hard limit for displaying multi-value fields25MAX_MULTI_VALUE_COUNT = 5026MAX_SPARKLINE_MV_COUNT = 10127class HDFSSelectableResultsTable(module.ModuleHandler):28 '''29 Provides module content for the SimpleResultsTable. The arguments supported30 are any params supported by the /splunk/search/jobs/<sid>/results endpoint, i.e.,31 count32 offset33 field_list34 search35 '''36 def generateResults(self, host_app, client_app, sid, count=1000,37 earliest_time=None, latest_time=None, field_list=None,38 offset=0, max_lines=None, reverse_order=0, entity_name='results',39 postprocess=None, display_row_numbers='True', show_preview='0', mark_interactive=None,40 sortField=None, sortDir=None, selectLabel = "Show", selectField = None):41 # check inputs42 count = max(int(count), 0)43 offset = max(int(offset), 0)44 display_row_numbers = splunk.util.normalizeBoolean(display_row_numbers)45 if not sid:46 raise Exception('SimpleResultsTable.generateResults - sid not passed!')47 job = splunk.search.JobLite(sid)48 # pass in any field list49 if (field_list) :50 job.setFetchOption(fieldList=field_list, show_empty_fields=False)51 if postprocess:52 job.setFetchOption(search=postprocess)53 if splunk.util.normalizeBoolean(show_preview) and entity_name == 'results':54 entity_name = 'results_preview'55 # set formatting56 job.setFetchOption(57 time_format=cherrypy.config.get('DISPATCH_TIME_FORMAT'),58 earliestTime=earliest_time,59 latestTime=latest_time,60 output_time_format=i18n.ISO8609_MICROTIME61 )62 # build output63 shash = hash(sid)64 output = []65 output.append('<div class="simpleResultsTableWrapper">')66 output.append('<table class="simpleResultsTable splTable')67 if (mark_interactive) :68 output.append(' enableMouseover')69 output.append('">')70 offset_start = offset71 # these two lines are a noop, since offset=max(0,int(offset)!72 #if offset < 0 and count < abs(offset):73 # offset_start = -count74 rs = job.getResults(entity_name, offset, count)75 if rs == None:76 return _('<p class="resultStatusMessage">The job appears to have expired or has been canceled. Splunk could not retrieve data for this search.</p>')77 # displayable fields; explicitly pull the _time field into the first column and _raw into the last78 fieldNames = [x for x in rs.fieldOrder() if (not x.startswith('_') or x in (TIME_FIELD, RAW_FIELD) )]79 #fieldNames = [x for x in getattr(job, entity_name).fieldOrder if (not x.startswith('_') or x == TIME_FIELD)]80 try:81 timePos = fieldNames.index(TIME_FIELD)82 fieldNames.pop(timePos)83 fieldNames.insert(0, TIME_FIELD)84 except ValueError:85 pass86 try:87 rawPos = fieldNames.index(RAW_FIELD)88 fieldNames.pop(rawPos)89 fieldNames.append(RAW_FIELD)90 except ValueError:91 pass92 #dataset = getattr(job, entity_name)[offset_start: offset+count]93 dataset = rs.results()94 # the client will request reverse_order=1, when it has determined95 # that we're in the special case of the 'mostly-backwards' sort order of real time search.96 # (we reverse it manually so it appears to the user 'mostly correct'.)97 # (yes, for the pedantic, correct just means "consistent with historical searches, with latest events on page 1")98 #99 # NOTE: the arithmetic of the offset and count with respect to eventAvailableCounts, will100 # already have been done on the client. This literally just reverses the sort order of101 # the N events we're actually being asked to return.102 if (splunk.util.normalizeBoolean(reverse_order)) :103 dataset.reverse()104 # determine the relative values for heatmapping105 localMin = 0106 localMax = 0107 adjustedMaxDelta = 0108 ordered = []109 for row in dataset:110 if 'TOTAL' not in row.values():111 for fieldName in fieldNames:112 if fieldName != 'TOTAL':113 try:114 ordered.append(float(row[fieldName][0].value))115 except:116 pass117 ordered.sort()118 if len(ordered):119 localMin = ordered[0]120 localMax = ordered[-1]121 # bracket min/max to 95th percentile122 adjustedMin, adjustedMax = util.getPercentiles(ordered, .05, .95)123 adjustedMaxDelta = max(abs(adjustedMin), abs(adjustedMax))124 logger.debug('SimpleResultsTable - localMin=%s localMax=%s adjustedMin=%s adjustedMax=%s' % (localMin, localMax, adjustedMin, adjustedMax))125 # generate headers126 output.append('<thead><tr>')127 if display_row_numbers:128 output.append('<th class="pos"></th>')129 output.append('<th class="selectable"> %s </th>' % cgi.escape(selectLabel))130 for field in fieldNames:131 output.append('<th><span class="">%s</span> <span class=""></span></th>' % (cgi.escape(field)))132 output.append('</tr></thead><tbody>')133 # generate data134 for i, result in enumerate(dataset):135 # check for the TOTAL row (not column)136 omitFromHeatmap = False137 for val in result.values():138 for item in val:139 try:140 if 'TOTAL' in item.getValue():141 omitFromHeatmap = True142 break143 except:144 pass145 rowClass = []146 if i % 5 == 4: rowClass.append('s')147 output.append('<tr class="%s">' % (' '.join(rowClass)))148 if display_row_numbers:149 output.append('<td class="pos">%s</td>' % i18n.format_number(i+1+offset))150 selectableValue = result.get(selectField,[None])[0].value151 output.append('<td class="selectable selectable-value"><input data-selectable-value="%s" type="checkbox" checked="checked" /></td>' % cgi.escape(selectableValue).replace('"','"'))152 for field in fieldNames:153 output.append('<td')154 heatValue = None155 isMaxValue = False156 isMinValue = False157 if not omitFromHeatmap and field != 'TOTAL':158 v = result.get(field, None)159 try:160 v = float(v[0].value)161 heatValue = min(max(math.ceil(v / adjustedMaxDelta * 1000) / 1000, -1), 1)162 if v == localMax: isMaxValue = True163 if v == localMin: isMinValue = True164 except:165 pass166 if heatValue != None:167 output.append(' heat="%s"' % heatValue)168 if isMaxValue:169 output.append(' isMax="1"')170 if isMinValue:171 output.append(' isMin="1"')172 if (mark_interactive and (field!="NULL" and field!="OTHER")) :173 output.append(' class="d"')174 fieldValues = result.get(field, None)175 # _time is a special case, because:176 # a) we need to localize the formatting.177 # b) for drilldown logic we need to pass down epochtime values for178 # both start and end. These are passed as attributes.179 if field=='_time' and result.time:180 startTime = splunk.util.dt2epoch(splunk.util.parseISO(str(result.time)))181 output.append(' startTime="' + str(startTime) + '"')182 duration = fieldValues = result.get("_span", None)183 if (duration and duration[0].value != "") :184 endTime = startTime + decimal.Decimal(duration[0].value)185 output.append(' endTime="' + str(endTime) + '"')186 output.append('>%s</td>' % i18n.format_datetime_microseconds(result.time))187 elif field==RAW_FIELD and isinstance(fieldValues, splunk.search.RawEvent):188 output.append(' field="%s">' % cgi.escape(field))189 output.append(cgi.escape(fieldValues.getRaw()));190 output.append('</td>')191 # render field values; multi-value as a list192 # cap display count to prevent blowout193 elif fieldValues:194 output.append(' field="%s">' % cgi.escape(field))195 if len(fieldValues) > 1 and fieldValues[0].value == "##__SPARKLINE__##":196 isSparklines = True197 fieldValues = fieldValues[1:]198 else:199 isSparklines = False200 output.append('<span%s>' % ' class="sparklines"' if isSparklines else '')201 if isSparklines:202 renderedValues = [cgi.escape(x.value) for x in fieldValues[:MAX_SPARKLINE_MV_COUNT]]203 else:204 renderedValues = [cgi.escape(x.value) for x in fieldValues[:MAX_MULTI_VALUE_COUNT]]205 if not isSparklines and len(fieldValues) > MAX_MULTI_VALUE_COUNT:206 clipCount = len(fieldValues) - MAX_MULTI_VALUE_COUNT207 renderedValues.append(_('[and %d more values]') % clipCount)208 # when we have multiValued fields we wrap them each in its own div elements209 if (len(renderedValues) > 1 or isSparklines):210 multiValueStr = [MULTI_VALUE_WRAPPER % x for x in renderedValues]211 output.append("".join(multiValueStr))212 # however for single values the extra div is unwanted.213 else:214 output.append("".join(renderedValues))215 output.append('</span></td>')216 else:217 output.append('></td>')218 output.append('</tr>')219 output.append('</tbody><tfoot><tr class="selection-toggle-row">')220 if display_row_numbers:221 output.append('<td class="pos"></th>')222 output.append('<td colspan="%d" class="toggle-all"><a class="select-all" href="#">All</a> / <a class="select-none" href="#">None</a></th>' % (len(fieldNames)+1))223 output.append('</tr></tfoot>')224 output.append('</table></div>')225 if len(dataset) == 0:226 # See SPL-55554 and SPL-55567, the results preview sometimes doesn't report a preview of the results227 # so assume that entity_name == 'results_preview' means we are doing a preview228 if rs.isPreview() or entity_name == 'results_preview':229 output = self.generateStatusMessage(entity_name, 'waiting', sid)230 else:231 output = self.generateStatusMessage(entity_name, 'nodata', sid)232 else:233 output = ''.join(output)...
HDFSSimpleFileTable.py
Source:HDFSSimpleFileTable.py
1#2# Splunk UI module python renderer3# This module is imported by the module loader (lib.module.ModuleMapper) into4# the splunk.appserver.mrsparkle.controllers.module.* namespace.5#6# required imports7import cherrypy8import controllers.module as module9# common imports10import splunk, splunk.search, splunk.util, splunk.entity11import lib.util as util12import lib.i18n as i18n13# logging setup14import logging15logger = logging.getLogger('splunk.appserver.controllers.module.HDFSSimpleFileTable')16import math17import cgi18import re19# define standard time field name20TIME_FIELD = '_time'21RAW_FIELD = '_raw'22# define wrapper for rendering multi-value fields23MULTI_VALUE_WRAPPER = '<div class="mv">%s</div>'24# define hard limit for displaying multi-value fields25MAX_MULTI_VALUE_COUNT = 5026MAX_SPARKLINE_MV_COUNT = 10127class HDFSSimpleFileTable(module.ModuleHandler):28 '''29 Provides module content for the SimpleResultsTable. The arguments supported30 are any params supported by the /splunk/search/jobs/<sid>/results endpoint, i.e.,31 count32 offset33 field_list34 search35 '''36 def generateResults(self, host_app, client_app, sid, count=1000,37 earliest_time=None, latest_time=None, field_list=None,38 offset=0, max_lines=None, reverse_order=0, entity_name='results',39 postprocess=None, display_row_numbers='True', show_preview='0', mark_interactive=None,40 sortField=None, sortDir=None):41 # check inputs42 count = max(int(count), 0)43 offset = max(int(offset), 0)44 display_row_numbers = splunk.util.normalizeBoolean(display_row_numbers)45 if not sid:46 raise Exception('SimpleResultsTable.generateResults - sid not passed!')47 job = splunk.search.JobLite(sid)48 # pass in any field list49 if (field_list) :50 job.setFetchOption(fieldList=field_list, show_empty_fields=False)51 if postprocess:52 job.setFetchOption(search=postprocess)53 if splunk.util.normalizeBoolean(show_preview) and entity_name == 'results':54 entity_name = 'results_preview'55 # set formatting56 job.setFetchOption(57 time_format=cherrypy.config.get('DISPATCH_TIME_FORMAT'),58 earliestTime=earliest_time,59 latestTime=latest_time,60 output_time_format=i18n.ISO8609_MICROTIME61 )62 offset_start = offset63 # these two lines are a noop, since offset=max(0,int(offset)!64 #if offset < 0 and count < abs(offset):65 # offset_start = -count66 rs = job.getResults(entity_name, offset, count)67 if rs == None:68 return _('<p class="resultStatusMessage">The job appears to have expired or has been canceled. Splunk could not retrieve data for this search.</p>')69 #dataset = getattr(job, entity_name)[offset_start: offset+count]70 dataset = rs.results()71 #if we don't have anything....lets get out fast!72 if len(dataset) == 0:73 if rs.isPreview():74 return self.generateStatusMessage(entity_name, 'waiting', sid)75 else:76 return self.generateStatusMessage(entity_name, 'nodata', sid)77 # displayable fields; explicitly pull the _time field into the first column and _raw into the last78 fieldNames = [x for x in rs.fieldOrder() if (not x.startswith('_') or x in (TIME_FIELD, RAW_FIELD) )]79 #fieldNames = [x for x in getattr(job, entity_name).fieldOrder if (not x.startswith('_') or x == TIME_FIELD)]80 try:81 timePos = fieldNames.index(TIME_FIELD)82 fieldNames.pop(timePos)83 fieldNames.insert(0, TIME_FIELD)84 except ValueError:85 pass86 try:87 rawPos = fieldNames.index(RAW_FIELD)88 fieldNames.pop(rawPos)89 fieldNames.append(RAW_FIELD)90 except ValueError:91 pass92 try:93 locationPos = fieldNames.index('Location')94 fieldNames.pop(locationPos)95 except ValueError:96 pass97 # the client will request reverse_order=1, when it has determined98 # that we're in the special case of the 'mostly-backwards' sort order of real time search.99 # (we reverse it manually so it appears to the user 'mostly correct'.)100 # (yes, for the pedantic, correct just means "consistent with historical searches, with latest events on page 1")101 #102 # NOTE: the arithmetic of the offset and count with respect to eventAvailableCounts, will103 # already have been done on the client. This literally just reverses the sort order of104 # the N events we're actually being asked to return.105 if (splunk.util.normalizeBoolean(reverse_order)) :106 dataset.reverse()107 # -------------------------build output ---------------------------#108 shash = hash(sid)109 output = []110 output.append('<div class="simpleResultsTableWrapper">')111 output.append('<table class="simpleResultsTable splTable')112 if (mark_interactive) :113 output.append(' enableMouseover')114 output.append('">')115 # generate headers116 output.append('<tr>')117 if display_row_numbers:118 output.append('<th class="pos"></th>')119 for field in fieldNames:120 output.append('<th><a><span class="sortLabel">%s</span> <span class="splSort%s"></span></a></th>' \121 % (cgi.escape(field), field != sortField and "None" or sortDir))122 output.append('<th>Actions</th>')123 # generate data124 for i, result in enumerate(dataset):125 isDir = str(result.get('Type')) == 'dir'126 location = str(result.get('Location'))127 output.append('<tr data-dir="%s" ' % (isDir))128 output.append('data-Location="%s">' % location.replace('"','"'))129 for field in fieldNames:130 output.append('<td')131 if (mark_interactive and (field!="NULL" and field!="OTHER")) :132 output.append(' class="d"')133 fieldValues = result.get(field, None)134 # render field values; multi-value as a list135 # cap display count to prevent blowout136 if fieldValues:137 output.append(' field="%s">' % cgi.escape(field))138 renderedValues = [cgi.escape(x.value) for x in fieldValues[:MAX_MULTI_VALUE_COUNT]]139 if len(fieldValues) > MAX_MULTI_VALUE_COUNT:140 clipCount = len(fieldValues) - MAX_MULTI_VALUE_COUNT141 renderedValues.append(_('[and %d more values]') % clipCount)142 # when we have multiValued fields we wrap them each in its own div elements143 if (len(renderedValues) > 1):144 multiValueStr = [MULTI_VALUE_WRAPPER % x for x in renderedValues]145 output.append("".join(multiValueStr))146 # however for single values the extra div is unwanted.147 else:148 if field=="Type":149 output.append("<img src=\"" + util.make_url("/static/app/HadoopConnect/images/%s_icon.png" % (fieldValues[0])) + "\"/>")150 else:151 output.append("".join(renderedValues))152 output.append('</span></td>')153 else:154 output.append('></td>')155 input_type = 'hdfs' if location.startswith('hdfs://') else 'monitor'156 path = re.sub('^\w+://', '', location) 157 get_args = {'ns': client_app, 'action': 'edit', 'def.name': path} 158 if input_type == 'monitor':159 get_args['preflight'] = 'preview' #skip preview160 indexHDFSManager = util.make_url(['manager',client_app,'data','inputs', input_type ,'_new'], get_args)161 previewURI = util.make_url(['app','search','flashtimeline'], {'q': '| hdfs read %s' % ''.join(('"',location.replace('"',r'\"'),'"'))})162 output.append("<td>")163 actions = []164 if (int(cherrypy.config.get("version_label")[0])>=5):165 indexLink = []166 indexLink.append("<a target=\"_new\" href=\"")167 indexLink.append(indexHDFSManager)168 indexLink.append("\">Add as data input</a>")169 actions.append(''.join(indexLink))170 if ( str(result.get('Type')) == 'file'):171 previewLink = []172 previewLink.append("<a target=\"_new\" href=\"")173 previewLink.append(previewURI)174 previewLink.append("\">Search</a>")175 actions.append(''.join(previewLink))176 output.append(' | '.join(actions))177 output.append("</td>")178 output.append('</tr>')179 output.append('</table></div>')180 #---------------------181 #Pass the data on out182 #---------------------183 output = ''.join(output)...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!