diff --git a/examples/MonteCarloExamples/scenarioAnalyzeMonteCarlo.py b/examples/MonteCarloExamples/scenarioAnalyzeMonteCarlo.py index 0b360a9a72..e6fb0c5007 100644 --- a/examples/MonteCarloExamples/scenarioAnalyzeMonteCarlo.py +++ b/examples/MonteCarloExamples/scenarioAnalyzeMonteCarlo.py @@ -20,7 +20,7 @@ r""" Motivation ---------- -This script is a basic demonstration of a script that can be used to plot Monte Carlo data with +This script is a basic demonstration of a script that can be used to plot Monte Carlo data with bokeh and datashaders. These tools are very efficient to plot large amounts of simulation data that is likely to occur with Monte Carlo sensitivity analysis studies. For example, running this script will create an HTML interactive view of the simulation data. Instead of seeing a fixed resolution, the user can @@ -80,7 +80,7 @@ Read all three steps before advancing. -The next steps outline how to run this script. +The next steps outline how to run this script. 1. This script can only be run once there exists data produced by the ``scenario_AttFeedbackMC.py`` script. @@ -102,19 +102,25 @@ import inspect import os -FOUND_DATESHADER = True +import holoviews as hv + + +FOUND_DATASHADER = True try: - from Basilisk.utilities.datashader_utilities import DS_Plot, curve_per_df_component, pull_and_format_df + from Basilisk.utilities.datashader_utilities import DS_Plot, curve_per_df_column, pull_and_format_df from Basilisk.utilities.MonteCarlo.AnalysisBaseClass import mcAnalysisBaseClass from bokeh.palettes import Blues9, Reds9, Greens9, \ Blues3, Reds3, Greens3, Oranges3, RdYlBu9 + from bokeh.server.server import Server + from bokeh.application import Application + from bokeh.application.handlers.function import FunctionHandler + except: print("Wasn't able to include the datashader_utilities.") - FOUND_DATESHADER = False + FOUND_DATASHADER = False import Basilisk.utilities.macros as macros - filename = inspect.getframeinfo(inspect.currentframe()).filename fileNameString = os.path.basename(os.path.splitext(__file__)[0]) path = os.path.dirname(os.path.abspath(filename)) @@ -122,6 +128,7 @@ bskPath = __path__[0] + def plotSuite(dataDir): """ This is the function to populate with all of the plots to be generated using datashaders and bokeh. @@ -139,20 +146,23 @@ def plotSuite(dataDir): sigmaPlot = DS_Plot(sigma_BR, title="Attitude Error", xAxisLabel='time [s]', yAxisLabel='Sigma_BR', macro_x=macros.NANO2SEC, - labels = ['b1', 'b2', 'b3'], cmap=RdYlBu9, - plotFcn=curve_per_df_component) + labels=['b1', 'b2', 'b3'], cmap=RdYlBu9, + plotObjType=hv.Points, + plotFcn=curve_per_df_column) plotList.append(sigmaPlot) - sigma_BR = pull_and_format_df(dataDir + "attGuidMsg.omega_BR_B.data", 3) - sigmaPlot = DS_Plot(sigma_BR, title="Attitude Rate Error", - xAxisLabel='time [s]', yAxisLabel='omega_BR_B', - macro_x=macros.NANO2SEC, macro_y=macros.R2D, - labels = ['b1', 'b2', 'b3'], cmap=RdYlBu9, - plotFcn=curve_per_df_component) - plotList.append(sigmaPlot) + # sigma_BR = pull_and_format_df(dataDir + "attGuidMsg.omega_BR_B.data", 3) + # sigmaPlot = DS_Plot(sigma_BR, title="Attitude Rate Error", + # xAxisLabel='time [s]', yAxisLabel='omega_BR_B', + # macro_x=macros.NANO2SEC, macro_y=macros.R2D, + # labels=['b1', 'b2', 'b3'], cmap=RdYlBu9, + # plotObjType=hv.Points, + # plotFcn=curve_per_df_column) + # plotList.append(sigmaPlot) return plotList + def run(show_plots): """ **This script is meant to be configured based on the user's needs. It can be configured using the following @@ -161,7 +171,7 @@ def run(show_plots): First, set ``show_all_data = True`` to get a broad view of the data and find a time window to investigate closer. Once the data is characterized, the user can set ``show_extreme_data = True`` to look at specific run cases - within the window. + within the window.title=title, xAxisLabel=x_axi Finally, the user can set ``show_optional_data = True`` to look at any extra data to determine why the extrema cases exist. @@ -171,7 +181,7 @@ def run(show_plots): :param optional_plots: plots additional user-defined plots """ - if not FOUND_DATESHADER: + if not FOUND_DATASHADER: return show_all_data = True @@ -179,8 +189,7 @@ def run(show_plots): optional_plots = False plotList = [] - analysis = mcAnalysisBaseClass() - analysis.dataDir = path + "/scenario_AttFeedbackMC/" + analysis = mcAnalysisBaseClass(path + "/scenario_AttFeedbackMC/rerun/") # save_as_static: save off static .html files of the plots generated into the staticDir directory. # The staticDir will be created inside the dataDir folder. @@ -192,13 +201,13 @@ def run(show_plots): plotList.extend(plotSuite(analysis.dataDir)) if show_extreme_data: - analysis.variableName = "attGuidMsg.omega_BR_B" + analysis.variableName = "attGuidMsg.sigma_BR" analysis.variableDim = 1 - extremaRunNumbers = analysis.getExtremaRunIndices(numExtrema=1, window=[500 * 1E9, 550 * 1E9]) + extrema_run_numbers = analysis.getExtremaRunIndices(numExtrema=10, window=[1e9, 2e9]) - analysis.extractSubsetOfRuns(runIdx=extremaRunNumbers) - plotList.extend(plotSuite(analysis.dataDir + "/subset")) + analysis.extractSubsetOfRuns(runIdx=extrema_run_numbers) + plotList.extend(plotSuite(analysis.dataDir + "subset/")) if optional_plots: # nominalRuns = analysis.getNominalRunIndices(50) @@ -207,22 +216,25 @@ def run(show_plots): shadowFactor = pull_and_format_df(analysis.dataDir + "/eclipse_data_0.shadowFactor.data", 1) shadowFactor = shadowFactor.dropna(axis=1) shadowFactorPlot = DS_Plot(shadowFactor, title="Optional Plots: Eclipse", - xAxisLabel='time[s]', yAxisLabel='Eclipse Factor', - macro_x=macros.NANO2SEC, macro_y=macros.R2D, - cmap=RdYlBu9, - plotFcn=curve_per_df_component) + xAxisLabel='time[s]', yAxisLabel='Eclipse Factor', + macro_x=macros.NANO2SEC, macro_y=macros.R2D, + cmap=RdYlBu9, + plotFcn=curve_per_df_column) # plotList.extend([statPlots]) plotList.extend([shadowFactorPlot]) + + + analysis.renderPlots(plotList) # The following must be commented out before this script can run. It is provided here # to ensure that the sphinx documentation generation process does not run this script # automatically. -if __name__ == "__main__": - run(False) +#if __name__ == "__main__": +# run(False) # uncomment the following line to run this script. -# run(False) \ No newline at end of file +run(False) diff --git a/examples/MonteCarloExamples/scenarioRerunMonteCarlo.py b/examples/MonteCarloExamples/scenarioRerunMonteCarlo.py index 184b32568c..e8eb0d303b 100644 --- a/examples/MonteCarloExamples/scenarioRerunMonteCarlo.py +++ b/examples/MonteCarloExamples/scenarioRerunMonteCarlo.py @@ -60,8 +60,8 @@ def run(time=None): scenarioName = "scenario_AttFeedback" monteCarlo = Controller() - monteCarlo.numProcess = 3 # Specify number of processes to spawn - runsList = [1] # Specify the run numbers to be rerun + monteCarlo.numProcess = 10 # Specify number of processes to spawn + runsList = [678] # Specify the run numbers to be rerun # # # Generic initialization @@ -88,7 +88,7 @@ def run(time=None): # Step 4: Add any additional retention policies desired retentionPolicy = RetentionPolicy() retentionPolicy.logRate = int(2E9) - retentionPolicy.addMessageLog("attGuidMsg", ["sigma_BR"]) + retentionPolicy.addMessageLog("attGuidMsg", ["sigma_BR"]) monteCarlo.addRetentionPolicy(retentionPolicy) @@ -97,6 +97,6 @@ def run(time=None): + if __name__ == "__main__": run() - diff --git a/src/utilities/MonteCarlo/AnalysisBaseClass.py b/src/utilities/MonteCarlo/AnalysisBaseClass.py index d6ca3d2882..a886232d40 100644 --- a/src/utilities/MonteCarlo/AnalysisBaseClass.py +++ b/src/utilities/MonteCarlo/AnalysisBaseClass.py @@ -5,18 +5,26 @@ import numpy as np import pandas as pd from Basilisk.utilities import macros +from bokeh.layouts import column +from bokeh.plotting import figure, show +import panel as pn +import numpy as np +from tornado.ioloop import IOLoop try: import holoviews as hv + import datashader as ds from Basilisk.utilities.datashader_utilities import DS_Plot, curve_per_df_component + from holoviews.operation.datashader import datashade, dynspread, spread except: pass + class mcAnalysisBaseClass: - def __init__(self): + def __init__(self, data_dir=""): self.variableName = "" self.variableDim = 0 - self.dataDir = "" + self.dataDir = data_dir self.numExtrema = 0 self.extremaRuns = [] self.timeWindow = [] @@ -66,17 +74,15 @@ def getExtremaRunIndices(self, numExtrema, window): times = self.data.index.tolist() # Find the closest indices to the time window requested - indStart = min(range(len(times)), key=lambda i: abs(times[i] - window[0])) - indEnd = min(range(len(times)), key=lambda i: abs(times[i] - window[1])) - self.timeWindow = [indStart, indEnd] + ind_start = min(range(len(times)), key=lambda i: abs(times[i] - window[0])) + ind_end = min(range(len(times)), key=lambda i: abs(times[i] - window[1])) + self.timeWindow = [times[ind_start], times[ind_end]] # Find outliers based on largest deviation off of the mean - self.mean = self.data.mean(axis=1, level=1) - self.diff = self.data.subtract(self.mean) - self.diff = self.diff.abs() - self.diff = self.diff.iloc[indStart:indEnd].max(axis=0) - self.extremaRuns = self.diff.nlargest(numExtrema).index._codes[0] - print("Extreme runs are ", list(dict.fromkeys(self.extremaRuns.tolist()))) + mean = self.data.mean(axis=1) + diff = self.data.abs().sub(mean, axis=0) + self.extremaRuns = diff.transpose().nlargest(numExtrema, self.timeWindow).index + print("Extrema runs are ", list(dict.fromkeys(self.extremaRuns.tolist()))) return self.extremaRuns def generateStatCurves(self): @@ -121,27 +127,27 @@ def generateStatPlots(self): varIdxList = range(self.variableDim) varIdxListStr = str(varIdxList) - meanRun.columns = pd.MultiIndex.from_product([['mean'], [0,1,2]], names=["stats", "varIdx"]) - medianRun.columns = pd.MultiIndex.from_product([['median'], [0,1,2]], names=["stats", "varIdx"]) - stdRun.columns = pd.MultiIndex.from_product([['std'], [0,1,2]], names=["stats", "varIdx"]) + meanRun.columns = pd.MultiIndex.from_product([['mean'], [0, 1, 2]], names=["stats", "varIdx"]) + medianRun.columns = pd.MultiIndex.from_product([['median'], [0, 1, 2]], names=["stats", "varIdx"]) + stdRun.columns = pd.MultiIndex.from_product([['std'], [0, 1, 2]], names=["stats", "varIdx"]) meanRun_plot = DS_Plot(meanRun, title="Mean Plot: " + self.variableName, - xAxisLabel='time[s]', yAxisLabel= self.variableName.split('.')[-1], - macro_x=macros.NANO2SEC, - labels=['1', '2', '3'], - plotFcn=curve_per_df_component) + xAxisLabel='time[s]', yAxisLabel=self.variableName.split('.')[-1], + macro_x=macros.NANO2SEC, + labels=['1', '2', '3'], + plotFcn=curve_per_df_component) medRun_plot = DS_Plot(medianRun, title="Median Plot: " + self.variableName, - xAxisLabel='time[s]', yAxisLabel= self.variableName.split('.')[-1], - macro_x=macros.NANO2SEC, - labels=['1', '2', '3'], - plotFcn=curve_per_df_component) + xAxisLabel='time[s]', yAxisLabel=self.variableName.split('.')[-1], + macro_x=macros.NANO2SEC, + labels=['1', '2', '3'], + plotFcn=curve_per_df_component) stdRun_plot = DS_Plot(stdRun, title="Standard Dev Plot: " + self.variableName, - xAxisLabel='time[s]', yAxisLabel= self.variableName.split('.')[-1], - macro_x=macros.NANO2SEC, - labels=['1', '2', '3'], - plotFcn=curve_per_df_component) + xAxisLabel='time[s]', yAxisLabel=self.variableName.split('.')[-1], + macro_x=macros.NANO2SEC, + labels=['1', '2', '3'], + plotFcn=curve_per_df_component) statRun_plots = [] statRun_plots.append(meanRun_plot) @@ -160,7 +166,11 @@ def extractSubsetOfRuns(self, runIdx): """ idx = pd.IndexSlice baseDir = self.dataDir + new_list = [] + for run in runIdx: + new_list.append(run[0]) + runIdx = new_list; # check if a subset directory exists, and if it already contains all runIdx requested if not os.path.exists(baseDir + "/subset/"): os.mkdir(baseDir + "/subset/") @@ -196,7 +206,7 @@ def extractSubsetOfRuns(self, runIdx): pd.to_pickle(dfSubSet, baseDir + "/subset/" + varName[-1]) print("Finished Populating Subset Directory") - def renderPlots(self, plotList): + def renderPlots(self, plotList, cols=2): """ Render all plots in plotList and print information about time taken, percent complete, which plot, etc. @@ -204,28 +214,28 @@ def renderPlots(self, plotList): :return: nothing. """ hv.extension('bokeh') - renderer = hv.renderer('bokeh').instance(mode='server') + #renderer = hv.renderer('bokeh') + #renderer = hv.renderer.instance(mode='server') if self.save_as_static: - print("Note: You requested to save static plots. This means no interactive python session will be generated.") + print( + "Note: You requested to save static plots. This means no interactive python session will be generated.") print("Beginning the plotting") if not os.path.exists(self.dataDir + self.staticDir): os.mkdir(self.dataDir + self.staticDir) + figures = [] for i in range(len(plotList)): startTime = time.time() - image, title = plotList[i].generateImage() - try: - if self.save_as_static: - # Save .html files of each of the plots into the static directory - hv.save(image, self.dataDir + self.staticDir + "/" + title + ".html") - else: - renderer.server_doc(image) - # Print information about the rendering process - print("LOADED: " + title +"\t\t\t" + - "Percent Complete: " + str(round((i + 1) / len(plotList) * 100, 2)) + "% \t\t\t" - "Time Elapsed: " + str( round(time.time() - startTime)) + " [s]") - except Exception as e: - print("Couldn't Plot " + title) - print(e) + fig, title = plotList[i].generateOverlay() + figures.append(fig) + print("LOADED: " + title + "\t\t\t" + + "Percent Complete: " + str(round((i + 1) / len(plotList) * 100, 2)) + "% \t\t\t" + "Time Elapsed: " + str( + round(time.time() - startTime)) + " [s]") + + layout = hv.Layout(figures).cols(cols) + + + return pn.panel(layout).servable() diff --git a/src/utilities/MonteCarlo/AnalysisPlottingTools.py b/src/utilities/MonteCarlo/AnalysisPlottingTools.py new file mode 100644 index 0000000000..d91fefa5a5 --- /dev/null +++ b/src/utilities/MonteCarlo/AnalysisPlottingTools.py @@ -0,0 +1,69 @@ +import holoviews as hv +from AnalysisBaseClass import mcAnalysisBaseClass +from Basilisk.utilities.datashader_utilities import DS_Plot, curve_per_df_column +from Basilisk.utilities import macros +from datashader.colors import Sets1to3 + + +class MCAnalysisPlottingTools: + """ + The main data structure in this class being manipulated is a dictionary of DS_Plots + """ + def __init__(self, data_dir="", att_guid_name="attGuidMsg"): + # key + self.variables = list[str]() + self.plots = dict[str, DS_Plot]() + self.extremaPlots = dict[str, DS_Plot]() + self.analysis = mcAnalysisBaseClass(data_dir) + self.attGuidName = att_guid_name + + def render_plots(self): + """ + This function should take all the collected mcAnalysisBaseClass objects, plot them, + organize them into a layout and return pn.panel(layout).servable() + similar to how AnalysisBaseClass renderPlots does it + :return: + """ + figures = [] + + for variable in self.variables: + figures.append(self.plots[variable].generateOverlay()) + + if self.extremaPlots[variable] is not None: + figures.append(self.extremaPlots[variable].generateOverlay()) + + pass + + def add_plot(self, variable_name, title="", x_axis_label="x", y_axis_label="", + macro_x=macros.NANO2SEC, macro_y=1.0, labels=[], cmap=Sets1to3): + """ + This function should take in the necessary information for a DS_Plot object + along with a path to a data directory to add a plot to the list of plots + + :return: + """ + data = self.analysis.pull_and_format_df(self.attGuidName + "." + variable_name) + + if title == "": + title = variable_name + + if y_axis_label == "": + y_axis_label = variable_name + + plot = DS_Plot(data, title=title, xAxisLabel=x_axis_label, yAxisLabel=y_axis_label, plotObjType=hv.Points, + labels=labels, macro_x=macro_x, macro_y=macro_y, cmap=cmap, plotFcn=curve_per_df_column) + + self.plots[variable_name] = plot + self.variables.append(variable_name) + + def add_extrema_plot(self, variable_name): + """ + This function should call getExtremaRunIndices and extractSubsetOfRuns to add a plot to the list + labeled as an extrema plot + This graph should directly display the curves without rastering them so that hover tools still work + :return: + """ + + + #need to change window + extreme_run_numbers = self.analysis.getExtremaRunIndices(numExtrema=10, window=[1e9, 2e9]) diff --git a/src/utilities/datashader_utilities.py b/src/utilities/datashader_utilities.py index 7f5f9dc8c9..286ddb567f 100644 --- a/src/utilities/datashader_utilities.py +++ b/src/utilities/datashader_utilities.py @@ -1,25 +1,32 @@ import warnings - +import time +import holoviews.operation.datashader import numpy as np +from colorcet import glasbey with warnings.catch_warnings(): warnings.simplefilter("ignore", category=DeprecationWarning) import pandas as pd import datashader as ds import holoviews as hv - from holoviews.operation.datashader import datashade, dynspread + from holoviews.operation.datashader import datashade, dynspread, spread from holoviews.streams import RangeXY from datashader.colors import Sets1to3 + from bokeh.models import HoverTool + from bokeh.plotting import figure from Basilisk.utilities import macros def pull_and_format_df(path, varIdxLen): + startTime = time.time() df = pd.read_pickle(path) if len(np.unique(df.columns.codes[1])) is not varIdxLen: print("Warning: " + path + " not formatted correctly!") newMultIndex = pd.MultiIndex.from_product([df.columns.codes[0], list(range(varIdxLen))], names=['runNum', 'varIdx']) - indices = pd.Index([0,1]) # Need multiple rows for curves + indices = pd.Index([0, 1]) # Need multiple rows for curves df = df.reindex(columns=newMultIndex, index=indices) + + print("Time Elapsed for pull_and_format_df: " + str( round(time.time() - startTime)) + " [s]") return df @@ -31,7 +38,7 @@ def curve_per_df_component(df): :return: """ idx = pd.IndexSlice - df = df.interpolate(method = "linear") + df = df.interpolate(method="linear") df_list = [] for i in np.unique(df.columns.codes[1]): # Select all of the component @@ -50,6 +57,33 @@ def curve_per_df_component(df): return df_list +def curve_per_run(df): + + startTime = time.time() + idx = pd.IndexSlice + print("idx:", idx) + df = df.interpolate(method="linear") + df_list = [] + print("columns:\n", df.columns) + for i in np.unique(df.columns.codes[1]): + print("i:", i) + + # Select all of the component + varIdx_df = df.loc[idx[:], idx[:, i]] + + # Inject NaNs at the end of the run so the curves don't wrap from t_f to t_0 + varIdx_df = pd.concat([varIdx_df, pd.DataFrame([np.nan] * varIdx_df.shape[1], index=varIdx_df.columns).T]) + + # Flatten values by column order + times = np.tile(varIdx_df.index, len(varIdx_df.columns.codes[0])) # Repeat time by number of runs + varIdx_flat = varIdx_df.values.flatten('F') + + # Generate a curve for each component + curve_df = pd.DataFrame(np.transpose([times, varIdx_flat]).tolist(), columns=['x', 'y']) + df_list.append(curve_df) + + print("Time Elapsed for curve_per_run: " + str( round(time.time() - startTime)) + " [s]") + return df_list def curve_per_df_column(df): """ @@ -58,31 +92,34 @@ def curve_per_df_column(df): """ idx = pd.IndexSlice df_list = [] - for index in range(len(df.columns)): + + for index in df.columns.tolist(): try: - i = df.columns.codes[0][index] # Multi-Index level=0 index - j = df.columns.codes[1][index] # Multi-Index level=1 index + i = df.columns.codes[0][index] # Multi-Index level=0 index + j = df.columns.codes[1][index] # Multi-Index level=1 index # Grab the desired x and y data - xData = df.index.values # time [ns] - yData = df.loc[idx[:], idx[i, j]].values # variable data - runNum = np.repeat(i, len(xData)) + x_data = df.index.values # time [ns] + y_data = df.loc[idx[:], idx[i, j]].values # variable data + print("try", len(x_data)) + run_num = np.repeat(i, len(x_data)) except: # Grab the desired x and y data - xData = df.index.values # time [ns] - yData = df.loc[idx[:], idx[index]].values # variable data - runNum = np.repeat(index, len(xData)) + x_data = df.index.values # time [ns] + y_data = df.loc[idx[:], idx[index]].values # variable data + run_num = np.repeat(index[0], len(x_data)) # Convert to two columns - plotData = pd.DataFrame(np.transpose([xData, yData]).tolist(), columns=['x', 'y'])#, runNum]).tolist() - df_list.append(plotData) + plot_data = (pd.DataFrame(np.transpose([x_data, y_data, run_num]).tolist(), columns=['x', 'y', 'run']) + .values.tolist()) + df_list.append(plot_data) return df_list def transform_dataframe(df_in, transforming_function): """ Transforms the data in 'df_in' using the function specified in 'transforming_function'. - E.g. if the data in 'df_in' are position vectors rvec and the 'transforming_function' is np.linalg.norm(rvec) + E.322323g. if the data in 'df_in' are position vectors rvec and the 'transforming_function' is np.linalg.norm(rvec) then a dataframe with the norm of each position vector is returned. """ num_runs = df_in.columns.levshape[0] @@ -149,12 +186,12 @@ class DS_Plot(): ''' def __init__(self, data, title='', - yAxisLabel='', xAxisLabel='time [ns]', - macro_y=1.0, macro_x=macros.NANO2SEC, - cmap=Sets1to3, - plotObjType=hv.Curve, - labels=[], - plotFcn=curve_per_df_component): + yAxisLabel='', xAxisLabel='time [ns]', + macro_y=1.0, macro_x=macros.NANO2SEC, + cmap=Sets1to3, + plotObjType=hv.Curve, + labels=[], + plotFcn=curve_per_df_column): if type(data) is not list: self.data = [data] else: @@ -170,7 +207,6 @@ def __init__(self, data, title='', self.labels = labels self.plotFcn = plotFcn - def generateCurves(self): ''' Generate hv.Curve or hv.Points from the provided dataframe(s) @@ -178,6 +214,8 @@ def generateCurves(self): Populates a dictionary with a unique identifier for each curve for curve coloring purposes :return: dict of hv.Curve or hv.Point objects ''' + + startTime = time.time() count = 0 curves = [] missingData = [] @@ -185,8 +223,8 @@ def generateCurves(self): self.max = self.data[0].values.max() for i in range(len(self.data)): - if self.min > self.data[0].values.min() : self.min = self.data[0].values.min() - if self.max < self.data[0].values.max() : self.max = self.data[0].values.max() + if self.min > self.data[0].values.min(): self.min = self.data[0].values.min() + if self.max < self.data[0].values.max(): self.max = self.data[0].values.max() self.data[i] = self.data[i] * self.macro_y self.data[i].index = self.data[i].index * self.macro_x @@ -195,7 +233,7 @@ def generateCurves(self): # Customize the individual component curves, points, other for curve_df in curveList: - curve = self.plotObjType(curve_df)#.opts(framewise=True) + curve = self.plotObjType(curve_df, label=f'Curve {count}').opts(tools=["hover"]) #.opts(framewise=True) curves.append(curve) count += 1 @@ -203,9 +241,9 @@ def generateCurves(self): missingData.append(True) # Label each curve with a unique identifier curves = {i: curves[i] for i in range(len(curves))} + print("Time Elapsed for generate curves: " + str( round(time.time() - startTime)) + " [s]") return curves, missingData - def generateImage(self): ''' Generate the image to be sent to the bokeh server. This includes @@ -217,31 +255,50 @@ def generateImage(self): hv.extension('bokeh') # Overlay these curves curves, missingData = self.generateCurves() - overlay = hv.NdOverlay(curves, kdims='k')#.opts(framewise=True) + overlay = hv.NdOverlay(curves, kdims='k') #.opts(framewise=True) # Rasterize the plot using datashade() if np.sum(missingData) == len(self.data): image = hv.Text(0.5, 0.5, "All Data Missing") else: + color_key = glasbey[:1000] if self.min == self.max and self.min != np.nan: - y_range = (self.min-0.1, self.max+0.1) - image = dynspread(datashade(overlay, dynamic=True, streams=[RangeXY], - aggregator=ds.count_cat('k'), color_key=self.cmap, + y_range = (self.min - 0.1, self.max + 0.1) + image = dynspread(datashade(overlay, dynamic=True, streams=[RangeXY], color_key=color_key, + aggregator=ds.count_cat('k'), y_range=y_range)).opts(framewise=True) else: - image = dynspread(datashade(overlay, dynamic=True, streams=[RangeXY], - aggregator=ds.count_cat('k'), color_key=self.cmap + image = dynspread(datashade(overlay, dynamic=True, streams=[RangeXY], color_key=color_key, + aggregator=ds.count_cat('k'), )).opts(framewise=True) image.opts(width=960, height=540) - image.opts(tools=['hover']) image.opts(padding=0.05) image.opts(title=self.title, xlabel=self.xAxisLabel, ylabel=self.yAxisLabel) if not self.labels == []: color_key = [(name, color) for name, color in zip(self.labels, self.cmap)] legend = hv.NdOverlay({n: hv.Points([np.nan, np.nan], label=str(n)).opts(color=c) for n, c in color_key}) - image = image*legend + image = image * legend + + hover = holoviews.operation.datashader.inspect(image) - return image, self.title + return hover, self.title + def generateOverlay(self): + + hv.extension('bokeh') + # Overlay these curves + curves, missing_data = self.generateCurves() + startTime = time.time() + overlay = hv.NdOverlay(curves, kdims=['run']) #.opts(framewise=True) + raster = hv.operation.datashader.rasterize(overlay).opts(width=800) + highlight = hv.operation.datashader.inspect(raster) + hover = HoverTool(tooltips=[("run", "@run")], point_policy="follow_mouse") + #overlay.opts(tools=[hover]) + layers = raster * highlight.opts(tools=[hover]) + + layers.opts(title=self.title, xlabel=self.xAxisLabel, ylabel=self.yAxisLabel) + print("Time Elapsed for generate overlay: " + str( round(time.time() - startTime)) + " [s]") + + return layers, self.title diff --git a/src/utilities/fswSetupRW.py b/src/utilities/fswSetupRW.py index 1092d49e85..56ff681359 100755 --- a/src/utilities/fswSetupRW.py +++ b/src/utilities/fswSetupRW.py @@ -27,7 +27,7 @@ def create( gsHat_B, Js, - uMax = numpy.NaN + uMax = numpy.nan ): """ Create a FSW RW object