diff --git a/ipynb/wltests/sched-evaluation-full.ipynb b/ipynb/wltests/sched-evaluation-full.ipynb index ec8d5e76ba73f00213e2812657d3625d18892b8b..59487566b2781a42c40f09b86b3bcd999da2e8f1 100644 --- a/ipynb/wltests/sched-evaluation-full.ipynb +++ b/ipynb/wltests/sched-evaluation-full.ipynb @@ -14,23 +14,22 @@ "metadata": {}, "outputs": [], "source": [ - "from conf import LisaLogging\n", - "LisaLogging.setup()" + "from lisa.utils import setup_logging\n", + "setup_logging()" ] }, { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": true - }, + "metadata": {}, "outputs": [], "source": [ "import logging\n", + "import pandas as pd\n", + "\n", "from IPython.display import display\n", "\n", - "from wa_results_collector import WaResultsCollector\n", - "import pandas as pd\n", + "from lisa.wa_results_collector import WaResultsCollector\n", "\n", "%pylab inline" ] @@ -253,9 +252,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": true - }, + "metadata": {}, "outputs": [], "source": [ "# Get Geekbench scores\n", @@ -263,11 +260,11 @@ "gb_scores_db = df[df.test == 'geekbench']\n", "\n", "# Group scores\n", - "grouped_df = gb_scores_db.groupby(['test', 'tag', 'kernel', 'metric'])\n", + "grouped_df = gb_scores_db.groupby(['test', 'kernel', 'metric'])\n", "\n", "# Get stats for grouped scores\n", - "stats_df = pd.DataFrame(grouped_df.describe(percentiles=[.95, .99]))\n", - "stats_df = stats_df.reset_index().rename(columns={'level_4': 'stats'})" + "stats_df = pd.DataFrame(grouped_df.value.describe(percentiles=[.95, .99])).reset_index()\n", + "#stats_df = stats_df.reset_index().rename(columns={'level_4': 'stats'})" ] }, { @@ -280,12 +277,14 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "scrolled": false + }, "outputs": [], "source": [ - "single_score_df = stats_df[stats_df.metric.str.match('Single.*')][['metric', 'kernel', 'stats', 'value']]\n", - "single_score_df['metric'] = single_score_df.metric.apply(lambda s : s.replace('Single-Core_', '').replace('_score', ''))\n", - "single_score_df = single_score_df.set_index(['metric', 'kernel', 'stats']).unstack()\n", + "single_score_df = stats_df[stats_df.metric.str.match(\"Single.*\")]\n", + "single_score_df.loc[:, \"metric\"] = single_score_df.metric.apply(lambda s : s.replace('Single-Core_', '').replace('_score', ''))\n", + "single_score_df = single_score_df.set_index(['kernel', 'test', 'metric'])\n", "logging.info(\"Detailed SINGLE core scores:\")\n", "single_score_df" ] @@ -303,10 +302,10 @@ "metadata": {}, "outputs": [], "source": [ - "multi_score_df = stats_df[stats_df.metric.str.match('Multi.*')][['metric', 'kernel', 'stats', 'value']]\n", - "multi_score_df['metric'] = multi_score_df.metric.apply(lambda s : s.replace('Multi-Core_', '').replace('_score', ''))\n", - "multi_score_df = multi_score_df.set_index(['metric', 'kernel', 'stats']).unstack()\n", - "logging.info(\"Detailed SINGLE core scores:\")\n", + "multi_score_df = stats_df[stats_df.metric.str.match(\"Multi.*\")]\n", + "multi_score_df.loc[:, \"metric\"] = multi_score_df.metric.apply(lambda s : s.replace('Multi-Core_', '').replace('_score', ''))\n", + "multi_score_df = multi_score_df.set_index(['kernel', 'test', 'metric'])\n", + "logging.info(\"Detailed MULTI core scores:\")\n", "multi_score_df" ] }, @@ -350,11 +349,11 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": true + "scrolled": true }, "outputs": [], "source": [ - "# Get Geekbench scores\n", + "# Get PCMark scores\n", "df = collector.results_df\n", "pm_scores_db = df[df.workload == 'pcmark']\n", "\n", @@ -362,8 +361,7 @@ "grouped_df = pm_scores_db.groupby(['test', 'tag', 'kernel', 'metric'])\n", "\n", "# Get stats for grouped scores\n", - "stats_df = pd.DataFrame(grouped_df.describe(percentiles=[.95, .99]))\n", - "stats_df = stats_df.reset_index().rename(columns={'level_4': 'stats'})" + "stats_df = pd.DataFrame(grouped_df.value.describe(percentiles=[.95, .99])).reset_index()" ] }, { @@ -372,9 +370,9 @@ "metadata": {}, "outputs": [], "source": [ - "pm_score_df = stats_df[stats_df.metric.str.match('pcmark_.*')][['metric', 'kernel', 'stats', 'value']]\n", - "pm_score_df['metric'] = pm_score_df.metric.apply(lambda s : s.replace('pcmark_', ''))\n", - "pm_score_df = pm_score_df.set_index(['metric', 'kernel', 'stats']).unstack()\n", + "pm_score_df = stats_df[stats_df.metric.str.match('pcmark_.*')]\n", + "pm_score_df.loc[:, 'metric'] = pm_score_df.metric.apply(lambda s : s.replace('pcmark_', ''))\n", + "pm_score_df = pm_score_df.set_index(['kernel', 'test'])\n", "logging.info(\"Detailed scores:\")\n", "pm_score_df" ] @@ -418,21 +416,21 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 2", + "display_name": "Python 3", "language": "python", - "name": "python2" + "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", - "version": 2 + "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", - "pygments_lexer": "ipython2", - "version": "2.7.12" + "pygments_lexer": "ipython3", + "version": "3.5.2" }, "toc": { "colors": { diff --git a/lisa/wa_results_collector.py b/lisa/wa_results_collector.py index e98150955384ea93a3e5ba2c88cb75280f37290c..78842ff810e2fea1dccb7af8ca98b26082d8f0e0 100644 --- a/lisa/wa_results_collector.py +++ b/lisa/wa_results_collector.py @@ -162,11 +162,11 @@ class WaResultsCollector(Loggable): df = df.append(df_list) kernel_refs = {} - if kernel_repo_path: - for sha1 in df['kernel_sha1'].unique(): - ref = find_shortest_symref(kernel_repo_path, sha1) - if ref: - kernel_refs[sha1] = ref + for sha1 in df['kernel_sha1'].unique(): + if kernel_repo_path: + kernel_refs[sha1] = find_shortest_symref(kernel_repo_path, sha1) or sha1 + else: + kernel_refs[sha1] = sha1 common_prefix = os.path.commonprefix(list(kernel_refs.values())) for sha1, ref in kernel_refs.items(): @@ -1083,7 +1083,7 @@ class WaResultsCollector(Loggable): return # Separate plot for each test (e.g. one plot for Jankbench list_view) - for (test, inv_id), test_comparisons in df.groupby(('test', 'inv_id')): + for (test, inv_id), test_comparisons in df.groupby(['test', 'inv_id']): # Vertical size of plot depends on how many metrics we're comparing # and how many things (kernels/tags) we're comparing metrics for. # a.k.a the total length of the comparisons df. @@ -1134,7 +1134,7 @@ class WaResultsCollector(Loggable): # For each of the things we're comparing we'll plot a bar chart # but slightly shifted. That's how we get multiple bars on each # y-axis point. - bars = ax.barh(bottom=pos + (i * thickness), + bars = ax.barh(pos + (i * thickness), width=gdf['diff_pct'], height=thickness, label=group, color=colors[i % len(colors)], align='center') diff --git a/shell/lisa_shell b/shell/lisa_shell index cc1a949c2719dfd2ea243fe12b780e2b6a67c147..785ceee5598d026440f1ffb2e55f70d2c7c49cd4 100755 --- a/shell/lisa_shell +++ b/shell/lisa_shell @@ -258,9 +258,9 @@ function _lisa-jupyter-start { IPADDR= if [[ -x /sbin/ifconfig ]]; then - IPADDR=$(/sbin/ifconfig $NETIF 2>/dev/null | \ - awk '/inet / {print $2}' | \ - sed 's/addr://') + IPADDR=$(/sbin/ifconfig $NETIF 2>/dev/null | \ + awk '/inet / {print $2}' | \ + grep -Eo [0-9]+\.[0-9]+\.[0-9]+\.[0-9]+) fi if [[ -z "$IPADDR" && -x /sbin/ip ]]; then @@ -380,7 +380,6 @@ function lisa-test { function lisa-wltest-init { export WLTEST_HOME="$LISA_HOME/tools/wltests" export WA_USER_DIRECTORY="$LISA_HOME/tools/wa_user_directory" - lisa-venv-activate # Check that the environment is properly configured if [[ -z "$ANDROID_HOME" ]]; then @@ -396,7 +395,7 @@ EOF # Check for required external dependencies - if which realpath &>/dev/null; then + if ! which realpath &>/dev/null; then cat <