Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions src/docs/tutorial.rst
Original file line number Diff line number Diff line change
Expand Up @@ -342,6 +342,11 @@ output:
:language: c++
:linenos:

Nanobench allows to specify further context information, which may be accessed using ``{{context(name)}}`` where ``name`` names a variable defined in ``Bench::context()``.

.. literalinclude:: ../test/tutorial_context.cpp
:language: c++
:linenos:

.. _tutorial-template-csv:

Expand Down
38 changes: 38 additions & 0 deletions src/include/nanobench.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@
#include <cstring> // memcpy
#include <iosfwd> // for std::ostream* custom output target in Config
#include <string> // all names
#include <unordered_map> // holds context information of results
#include <vector> // holds all results

#define ANKERL_NANOBENCH(x) ANKERL_NANOBENCH_PRIVATE_##x()
Expand Down Expand Up @@ -177,6 +178,8 @@ class BigO;
*
* * `{{relative}}` True or false, depending on the setting you have used. See Bench::relative().
*
* * `{{context(variableName)}} See Bench::context().
*
* Apart from these tags, it is also possible to use some mathematical operations on the measurement data. The operations
* are of the form `{{command(name)}}`. Currently `name` can be one of `elapsed`, `iterations`. If performance counters
* are available (currently only on current Linux systems), you also have `pagefaults`, `cpucycles`,
Expand Down Expand Up @@ -395,6 +398,7 @@ struct Config {
std::string mTimeUnitName = "ns";
bool mShowPerformanceCounters = true;
bool mIsRelative = false;
std::unordered_map<std::string,std::string> mContext{};

Config();
~Config();
Expand Down Expand Up @@ -442,6 +446,7 @@ class Result {
ANKERL_NANOBENCH(NODISCARD) double sumProduct(Measure m1, Measure m2) const noexcept;
ANKERL_NANOBENCH(NODISCARD) double minimum(Measure m) const noexcept;
ANKERL_NANOBENCH(NODISCARD) double maximum(Measure m) const noexcept;
ANKERL_NANOBENCH(NODISCARD) std::string const& context(std::string const&) const noexcept;

ANKERL_NANOBENCH(NODISCARD) bool has(Measure m) const noexcept;
ANKERL_NANOBENCH(NODISCARD) double get(size_t idx, Measure m) const;
Expand Down Expand Up @@ -674,6 +679,20 @@ class Bench {
Bench& name(std::string const& benchmarkName);
ANKERL_NANOBENCH(NODISCARD) std::string const& name() const noexcept;

/**
* @brief Set context information.
*
* The information can be accessed using custom render templates via `{{context(variableName)}}`.
* Trying to render a variable that hasn't been set before, results in empty output (for that variable).
* Not included in (default) markdown table.
*
* @see render()
*
* @param variableName The name of the context variable.
* @param variableValue The value of the context variable.
*/
Bench& context(std::string const& variableName, std::string const& variableValue);

/**
* @brief Sets the batch size.
*
Expand Down Expand Up @@ -1600,6 +1619,10 @@ static std::ostream& generateResultTag(Node const& n, Result const& r, std::ostr
std::vector<std::string> matchResult;
if (matchCmdArgs(std::string(n.begin, n.end), matchResult)) {
if (matchResult.size() == 2) {
if (matchResult[0] == "context") {
return out << r.context(matchResult[1]);
}

auto m = Result::fromString(matchResult[1]);
if (m == Result::Measure::_size) {
return out << 0.0;
Expand Down Expand Up @@ -2991,6 +3014,16 @@ double Result::maximum(Measure m) const noexcept {
return *std::max_element(data.begin(), data.end());
}

std::string const& Result::context(std::string const& variableName) const noexcept {
auto context = mConfig.mContext;
auto search = context.find(variableName);
static std::string NOTFOUND;
if (search == context.end()) {
return NOTFOUND;
}
return search->second;
}

Result::Measure Result::fromString(std::string const& str) {
if (str == "elapsed") {
return Measure::elapsed;
Expand Down Expand Up @@ -3118,6 +3151,11 @@ std::string const& Bench::name() const noexcept {
return mConfig.mBenchmarkName;
}

Bench& Bench::context(std::string const& variableName, std::string const& variableValue) {
mConfig.mContext[variableName] = variableValue;
return *this;
}

// Number of epochs to evaluate. The reported result will be the median of evaluation of each epoch.
Bench& Bench::epochs(size_t numEpochs) noexcept {
mConfig.mNumEpochs = numEpochs;
Expand Down
1 change: 1 addition & 0 deletions src/test/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ target_sources_local(nb PRIVATE
example_shuffle.cpp
tutorial_complexity_set.cpp
tutorial_complexity_sort.cpp
tutorial_context.cpp
tutorial_fast_v1.cpp
tutorial_fast_v2.cpp
tutorial_fluctuating_v1.cpp
Expand Down
50 changes: 50 additions & 0 deletions src/test/tutorial_context.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
#include <nanobench.h>
#include <thirdparty/doctest/doctest.h>

#include <iostream>
#include <cmath>

namespace {

template<typename T>
void fma() {
T x(1), y(2), z(3);
z = std::fma(x, y, z);
ankerl::nanobench::doNotOptimizeAway(z);
}

template<typename T>
void plus_eq() {
T x(1), y(2), z(3);
z += x*y;
ankerl::nanobench::doNotOptimizeAway(z);
}

char const* csv() {
return R"DELIM("title";"name";"scalar";"foo";"elapsed";"total"
{{#result}}"{{title}}";"{{name}}";"{{context(scalar)}}";"{{context(foo)}}";{{median(elapsed)}};{{sumProduct(iterations, elapsed)}}
{{/result}})DELIM";
}

} // namespace

TEST_CASE("tutorial_context") {
ankerl::nanobench::Bench bench;
bench.title("Addition").output(nullptr);
bench.run("missing context", [] {
int a = 1, b = 2;
int c = a + b;
ankerl::nanobench::doNotOptimizeAway(c);
});
bench
.context("scalar", "f32")
.context("foo", "bar")
.run("+=", plus_eq<float>)
.run("fma", fma<float>);
bench
.context("scalar", "f64")
.context("foo", "baz")
.run("+=", plus_eq<double>)
.run("fma", fma<double>);
bench.render(csv(), std::cout);
}