Compare commits

..

16 Commits

Author SHA1 Message Date
Niels Lohmann
8c20571136 Merge branch 'release/3.2.0' 2018-08-20 19:36:10 +02:00
Niels Lohmann
183390c10b Merge branch 'release/3.1.2' 2018-03-14 21:47:50 +01:00
Niels Lohmann
c8ea63a31b Merge branch 'release/3.1.1' 2018-02-13 19:36:20 +01:00
Niels Lohmann
15acf260ca Merge branch 'release/3.1.0' 2018-02-02 00:05:55 +01:00
Niels Lohmann
a43347e1f9 Merge branch 'develop' 2017-12-29 20:35:02 +01:00
Niels Lohmann
5681e55da8 Merge branch 'release/3.0.1' 2017-12-29 20:24:20 +01:00
Niels Lohmann
9f81beb5e2 Merge branch 'release/3.0.0' 2017-12-17 11:04:04 +01:00
Niels Lohmann
c0d511ea50 🔀 merge branch 'release/2.1.1' 2017-02-25 16:36:46 +01:00
Niels Lohmann
1e99a0273f 🔀 merge branch 'release/2.1.0' 2017-01-28 18:42:27 +01:00
Niels Lohmann
528bc96d7e Merge branch 'develop' 2017-01-02 16:41:33 +01:00
Niels Lohmann
1c98ce869c 🔀 merge branch 'release/2.0.10' 2017-01-02 16:37:52 +01:00
Niels Lohmann
6df60b0448 🔀 Merge branch 'release/2.0.9' 2016-12-16 21:34:54 +01:00
Niels Lohmann
272ebdc900 🔖 Merge branch 'release/2.0.8' 2016-12-02 20:07:14 +01:00
Niels
79a9d00e15 Merge branch 'develop' 2016-11-03 18:54:59 +01:00
Niels
a4d13c92ba Merge branch 'release/2.0.7' 2016-11-02 20:52:24 +01:00
Niels
60bba02cc6 Merge branch 'release/2.0.6' 2016-10-15 16:47:56 +02:00
1687 changed files with 5321273 additions and 64506 deletions

View File

@@ -1,56 +0,0 @@
version: 2
jobs:
build_stable:
docker:
- image: debian:stretch
steps:
- checkout
- run:
name: Install required tools
command: 'apt-get update && apt-get install -y gcc g++ git cmake'
- run:
name: Run CMake
command: 'mkdir build ; cd build ; cmake .. -DJSON_BuildTests=On'
- run:
name: Compile
command: 'cmake --build build'
- run:
name: Execute test suite
command: 'cd build ; ctest --output-on-failure -j 2'
build_bleeding_edge:
docker:
- image: archlinux
steps:
- checkout
- run:
name: Install required tools
command: 'pacman -Sy --noconfirm base base-devel gcc git cmake'
- run:
name: Run CMake
command: 'mkdir build ; cd build ; cmake .. -DJSON_BuildTests=On'
- run:
name: Compile
command: 'cmake --build build'
- run:
name: Execute test suite
command: 'cd build ; ctest --output-on-failure -j 2'
workflows:
version: 2
build_and_test_all:
jobs:
- build_stable:
filters:
branches:
ignore:
gh-pages
- build_bleeding_edge:
filters:
branches:
ignore:
gh-pages

View File

@@ -1,84 +0,0 @@
#AccessModifierOffset: 2
AlignAfterOpenBracket: Align
AlignConsecutiveAssignments: false
#AlignConsecutiveBitFields: false
AlignConsecutiveDeclarations: false
AlignConsecutiveMacros: false
AlignEscapedNewlines: Right
#AlignOperands: AlignAfterOperator
AlignTrailingComments: true
AllowAllArgumentsOnNextLine: false
AllowAllConstructorInitializersOnNextLine: false
AllowAllParametersOfDeclarationOnNextLine: false
AllowShortBlocksOnASingleLine: Empty
AllowShortCaseLabelsOnASingleLine: false
#AllowShortEnumsOnASingleLine: true
AllowShortFunctionsOnASingleLine: Empty
AllowShortIfStatementsOnASingleLine: Never
AllowShortLambdasOnASingleLine: Empty
AllowShortLoopsOnASingleLine: false
AlwaysBreakAfterReturnType: None
AlwaysBreakBeforeMultilineStrings: false
AlwaysBreakTemplateDeclarations: Yes
BinPackArguments: false
BinPackParameters: false
#BitFieldColonSpacing: Both
BreakBeforeBraces: Custom # or Allman
BraceWrapping:
AfterCaseLabel: true
AfterClass: true
AfterControlStatement: Always
AfterEnum: true
AfterFunction: true
AfterNamespace: false
AfterStruct: true
AfterUnion: true
AfterExternBlock: false
BeforeCatch: true
BeforeElse: true
#BeforeLambdaBody: false
#BeforeWhile: false
SplitEmptyFunction: false
SplitEmptyRecord: false
SplitEmptyNamespace: false
BreakBeforeTernaryOperators: true
BreakConstructorInitializers: BeforeComma
BreakStringLiterals: false
ColumnLimit: 0
CompactNamespaces: false
ConstructorInitializerIndentWidth: 2
Cpp11BracedListStyle: true
PointerAlignment: Left
FixNamespaceComments: true
IncludeBlocks: Preserve
#IndentCaseBlocks: false
IndentCaseLabels: true
IndentGotoLabels: false
IndentPPDirectives: BeforeHash
IndentWidth: 4
KeepEmptyLinesAtTheStartOfBlocks: false
MaxEmptyLinesToKeep: 1
NamespaceIndentation: None
ReflowComments: false
SortIncludes: true
SortUsingDeclarations: true
SpaceAfterCStyleCast: false
SpaceAfterLogicalNot: false
SpaceAfterTemplateKeyword: false
SpaceBeforeAssignmentOperators: true
SpaceBeforeCpp11BracedList: false
SpaceBeforeParens: ControlStatements
SpaceBeforeRangeBasedForLoopColon: true
SpaceBeforeSquareBrackets: false
SpaceInEmptyBlock: false
SpaceInEmptyParentheses: false
SpacesBeforeTrailingComments: 2
SpacesInAngles: false
SpacesInCStyleCastParentheses: false
SpacesInConditionalStatement: false
SpacesInContainerLiterals: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
Standard: c++11
TabWidth: 4
UseTab: Never

View File

@@ -1,23 +0,0 @@
Checks: '*,
-cppcoreguidelines-avoid-goto,
-cppcoreguidelines-avoid-magic-numbers,
-cppcoreguidelines-macro-usage,
-fuchsia-default-arguments-calls,
-fuchsia-default-arguments-declarations,
-fuchsia-overloaded-operator,
-google-explicit-constructor,
-google-runtime-references,
-hicpp-avoid-goto,
-hicpp-explicit-conversions,
-hicpp-no-array-decay,
-hicpp-uppercase-literal-suffix,
-llvm-header-guard,
-llvm-include-order,
-misc-non-private-member-variables-in-classes,
-modernize-use-trailing-return-type,
-readability-magic-numbers,
-readability-uppercase-literal-suffix'
CheckOptions:
- key: hicpp-special-member-functions.AllowSoleDefaultDtor
value: 1

6
.github/CODEOWNERS vendored
View File

@@ -1,6 +0,0 @@
# JSON for Modern C++ has been originally written by Niels Lohmann.
# Since 2013 over 140 contributors have helped to improve the library.
# This CODEOWNERS file is only to make sure that @nlohmann is requested
# for a code review in case of a pull request.
* @nlohmann

View File

@@ -4,7 +4,7 @@
This project started as a little excuse to exercise some of the cool new C++11 features. Over time, people actually started to use the JSON library (yey!) and started to help improve it by proposing features, finding bugs, or even fixing my mistakes. I am really [thankful](https://github.com/nlohmann/json/blob/master/README.md#thanks) for this and try to keep track of all the helpers.
To make it as easy as possible for you to contribute and for me to keep an overview, here are a few guidelines which should help us avoid all kinds of unnecessary work or disappointment. And of course, this document is subject to discussion, so please [create an issue](https://github.com/nlohmann/json/issues/new/choose) or a pull request if you find a way to improve it!
To make it as easy as possible for you to contribute and for me to keep an overview, here are a few guidelines which should help us avoid all kinds of unnecessary work or disappointment. And of course, this document is subject to discussion, so please [create an issue](https://github.com/nlohmann/json/issues/new) or a pull request if you find a way to improve it!
## Private reports
@@ -12,7 +12,7 @@ Usually, all issues are tracked publicly on [GitHub](https://github.com/nlohmann
## Prerequisites
Please [create an issue](https://github.com/nlohmann/json/issues/new/choose), assuming one does not already exist, and describe your concern. Note you need a [GitHub account](https://github.com/signup/free) for this.
Please [create an issue](https://github.com/nlohmann/json/issues/new), assuming one does not already exist, and describe your concern. Note you need a [GitHub account](https://github.com/signup/free) for this.
## Describe your issue
@@ -22,7 +22,7 @@ Clearly describe the issue:
- If you propose a change or addition, try to give an **example** how the improved code could look like or how to use it.
- If you found a compilation error, please tell us which **compiler** (version and operating system) you used and paste the (relevant part of) the error messages to the ticket.
Please stick to the provided issue templates ([bug report](https://github.com/nlohmann/json/blob/develop/.github/ISSUE_TEMPLATE/Bug_report.md), [feature request](https://github.com/nlohmann/json/blob/develop/.github/ISSUE_TEMPLATE/Feature_request.md), or [question](https://github.com/nlohmann/json/blob/develop/.github/ISSUE_TEMPLATE/question.md)) if possible.
Please stick to the [issue template](https://github.com/nlohmann/json/blob/develop/.github/ISSUE_TEMPLATE.md) if possible.
## Files to change
@@ -32,7 +32,7 @@ To make changes, you need to edit the following files:
1. [`include/nlohmann/*`](https://github.com/nlohmann/json/tree/develop/include/nlohmann) - These files are the sources of the library. Before testing or creating a pull request, execute `make amalgamate` to regenerate `single_include/nlohmann/json.hpp`.
2. [`test/src/unit-*.cpp`](https://github.com/nlohmann/json/tree/develop/test/src) - These files contain the [doctest](https://github.com/onqtam/doctest) unit tests which currently cover [100 %](https://coveralls.io/github/nlohmann/json) of the library's code.
2. [`test/src/unit-*.cpp`](https://github.com/nlohmann/json/tree/develop/test/src) - These files contain the [Catch](https://github.com/philsquared/Catch) unit tests which currently cover [100 %](https://coveralls.io/github/nlohmann/json) of the library's code.
If you add or change a feature, please also add a unit test to this file. The unit tests can be compiled and executed with
@@ -54,9 +54,9 @@ To make changes, you need to edit the following files:
## Please don't
- The C++11 support varies between different **compilers** and versions. Please note the [list of supported compilers](https://github.com/nlohmann/json/blob/master/README.md#supported-compilers). Some compilers like GCC 4.7 (and earlier), Clang 3.3 (and earlier), or Microsoft Visual Studio 13.0 and earlier are known not to work due to missing or incomplete C++11 support. Please refrain from proposing changes that work around these compiler's limitations with `#ifdef`s or other means.
- The C++11 support varies between different **compilers** and versions. Please note the [list of supported compilers](https://github.com/nlohmann/json/blob/master/README.md#supported-compilers). Some compilers like GCC 4.8 (and earlier), Clang 3.3 (and earlier), or Microsoft Visual Studio 13.0 and earlier are known not to work due to missing or incomplete C++11 support. Please refrain from proposing changes that work around these compiler's limitations with `#ifdef`s or other means.
- Specifically, I am aware of compilation problems with **Microsoft Visual Studio** (there even is an [issue label](https://github.com/nlohmann/json/issues?utf8=✓&q=label%3A%22visual+studio%22+) for these kind of bugs). I understand that even in 2016, complete C++11 support isn't there yet. But please also understand that I do not want to drop features or uglify the code just to make Microsoft's sub-standard compiler happy. The past has shown that there are ways to express the functionality such that the code compiles with the most recent MSVC - unfortunately, this is not the main objective of the project.
- Please refrain from proposing changes that would **break [JSON](https://json.org) conformance**. If you propose a conformant extension of JSON to be supported by the library, please motivate this extension.
- Please refrain from proposing changes that would **break [JSON](http://json.org) conformance**. If you propose a conformant extension of JSON to be supported by the library, please motivate this extension.
- We shall not extend the library to **support comments**. There is quite some [controversy](https://www.reddit.com/r/programming/comments/4v6chu/why_json_doesnt_support_comments_douglas_crockford/) around this topic, and there were quite some [issues](https://github.com/nlohmann/json/issues/376) on this. We believe that JSON is fine without comments.
- We do not preserve the **insertion order of object elements**. The [JSON standard](https://tools.ietf.org/html/rfc7159.html) defines objects as "an unordered collection of zero or more name/value pairs". To this end, this library does not preserve insertion order of name/value pairs. (In fact, keys will be traversed in alphabetical order as `std::map` with `std::less` is used by default.) Note this behavior conforms to the standard, and we shall not change it to any other order. If you do want to preserve the insertion order, you can specialize the object type with containers like [`tsl::ordered_map`](https://github.com/Tessil/ordered-map) or [`nlohmann::fifo_map`](https://github.com/nlohmann/fifo_map).
@@ -66,6 +66,6 @@ To make changes, you need to edit the following files:
The following areas really need contribution:
- Extending the **continuous integration** toward more exotic compilers such as Android NDK, Intel's Compiler, or the bleeding-edge versions Clang.
- Extending the **continuous integration** toward more exotic compilers such as Android NDK, Intel's Compiler, or the bleeding-edge versions of GCC or Clang.
- Improving the efficiency of the **JSON parser**. The current parser is implemented as a naive recursive descent parser with hand coded string handling. More sophisticated approaches like LALR parsers would be really appreciated. That said, parser generators like Bison or ANTLR do not play nice with single-header files -- I really would like to keep the parser inside the `json.hpp` header, and I am not aware of approaches similar to [`re2c`](http://re2c.org) for parsing.
- Extending and updating existing **benchmarks** to include (the most recent version of) this library. Though efficiency is not everything, speed and memory consumption are very important characteristics for C++ developers, so having proper comparisons would be interesting.

2
.github/FUNDING.yml vendored
View File

@@ -1,2 +0,0 @@
github: nlohmann
custom: http://paypal.me/nlohmann

View File

@@ -1,57 +1,19 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: 'kind: bug'
assignees: ''
---
<!-- Provide a concise summary of the issue in the title above. -->
#### What is the issue you have?
<!-- Provide a detailed introduction to the issue itself, and why you consider it to be a bug. -->
<!-- If possible, be specific and add stack traces, error messages, etc. Avoid vague terms like "crash" or "doesn't work". -->
#### Please describe the steps to reproduce the issue.
<!-- Provide a link to a live example, or an unambiguous set of steps to -->
<!-- reproduce this bug. Include code to reproduce, if relevant -->
1.
2.
3.
#### Can you provide a small but working code example?
<!-- Please understand that we cannot analyze and debug large code bases. -->
#### What is the expected behavior?
<!-- Tell us what should happen -->
#### And what is the actual behavior instead?
<!-- Tell us what happens instead. -->
#### Which compiler and operating system are you using?
<!-- Include as many relevant details about the environment you experienced the bug in. -->
<!-- Make sure you use a supported compiler, see https://github.com/nlohmann/json#supported-compilers. -->
- Compiler: ___
- Operating system: ___
#### Which version of the library did you use?
<!-- Please add an `x` to the respective line. -->
- [ ] latest release version 3.9.1
- [ ] other release - please state the version: ___
- [ ] the `develop` branch
#### If you experience a compilation error: can you [compile and run the unit tests](https://github.com/nlohmann/json#execute-unit-tests)?
- [ ] yes
- [ ] no - please copy/paste the error message below
---
name: Bug report
about: Create a report to help us improve
---
- What is the issue you have?
- Please describe the steps to reproduce the issue. Can you provide a small but working code example?
- What is the expected behavior?
- And what is the actual behavior instead?
- Which compiler and operating system are you using? Is it a [supported compiler](https://github.com/nlohmann/json#supported-compilers)?
- Did you use a released version of the library or the version from the `develop` branch?
- If you experience a compilation error: can you [compile and run the unit tests](https://github.com/nlohmann/json#execute-unit-tests)?

View File

@@ -0,0 +1,9 @@
---
name: Feature request
about: Suggest an idea for this project
---
- Describe the feature in as much detail as possible.
- Include sample usage where appropriate.

View File

@@ -1,5 +0,0 @@
blank_issues_enabled: false
contact_links:
- name: Ask a question
url: https://github.com/nlohmann/json/discussions
about: Ask questions and discuss with other community members

View File

@@ -13,7 +13,7 @@ Read the [Contribution Guidelines](https://github.com/nlohmann/json/blob/develop
## Please don't
- The C++11 support varies between different **compilers** and versions. Please note the [list of supported compilers](https://github.com/nlohmann/json/blob/master/README.md#supported-compilers). Some compilers like GCC 4.7 (and earlier), Clang 3.3 (and earlier), or Microsoft Visual Studio 13.0 and earlier are known not to work due to missing or incomplete C++11 support. Please refrain from proposing changes that work around these compiler's limitations with `#ifdef`s or other means.
- The C++11 support varies between different **compilers** and versions. Please note the [list of supported compilers](https://github.com/nlohmann/json/blob/master/README.md#supported-compilers). Some compilers like GCC 4.8 (and earlier), Clang 3.3 (and earlier), or Microsoft Visual Studio 13.0 and earlier are known not to work due to missing or incomplete C++11 support. Please refrain from proposing changes that work around these compiler's limitations with `#ifdef`s or other means.
- Specifically, I am aware of compilation problems with **Microsoft Visual Studio** (there even is an [issue label](https://github.com/nlohmann/json/issues?utf8=✓&q=label%3A%22visual+studio%22+) for these kind of bugs). I understand that even in 2016, complete C++11 support isn't there yet. But please also understand that I do not want to drop features or uglify the code just to make Microsoft's sub-standard compiler happy. The past has shown that there are ways to express the functionality such that the code compiles with the most recent MSVC - unfortunately, this is not the main objective of the project.
- Please refrain from proposing changes that would **break [JSON](https://json.org) conformance**. If you propose a conformant extension of JSON to be supported by the library, please motivate this extension.
- Please refrain from proposing changes that would **break [JSON](http://json.org) conformance**. If you propose a conformant extension of JSON to be supported by the library, please motivate this extension.
- Please do not open pull requests that address **multiple issues**.

5
.github/SECURITY.md vendored
View File

@@ -1,5 +0,0 @@
# Security Policy
## Reporting a Vulnerability
Usually, all issues are tracked publicly on [GitHub](https://github.com/nlohmann/json/issues). If you want to make a private report (e.g., for a vulnerability or to attach an example that is not meant to be published), please send an email to <mail@nlohmann.me>. You can use [this key](https://keybase.io/nlohmann/pgp_keys.asc?fingerprint=797167ae41c0a6d9232e48457f3cea63ae251b69) for encryption.

View File

@@ -1,54 +0,0 @@
name: "Code scanning - action"
on:
push:
branches: [develop, ]
pull_request:
# The branches below must be a subset of the branches above
branches: [develop]
schedule:
- cron: '0 19 * * 1'
jobs:
CodeQL-Build:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v2
with:
# We must fetch at least the immediate parents so that if this is
# a pull request then we can checkout the head.
fetch-depth: 2
# If this run was triggered by a pull request event, then checkout
# the head of the pull request instead of the merge commit.
- run: git checkout HEAD^2
if: ${{ github.event_name == 'pull_request' }}
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v1
# Override language selection by uncommenting this and choosing your languages
# with:
# languages: go, javascript, csharp, python, cpp, java
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v1
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
# and modify them (or add more) to build your code if your project
# uses a compiled language
#- run: |
# make bootstrap
# make release
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v1

View File

@@ -1,17 +0,0 @@
name: macOS
on: [push, pull_request]
jobs:
build:
runs-on: macos-latest
steps:
- uses: actions/checkout@v1
- name: cmake
run: cmake -S . -B build -D CMAKE_BUILD_TYPE=Debug -DJSON_BuildTests=On
- name: build
run: cmake --build build --parallel 10
- name: test
run: cd build ; ctest -j 10 --output-on-failure

View File

@@ -1,64 +0,0 @@
name: Ubuntu
on: [push, pull_request]
jobs:
gcc_build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
- name: install_gcc
run: |
sudo apt update
sudo apt install gcc-10 g++-10
shell: bash
- name: cmake
run: cmake -S . -B build -D CMAKE_BUILD_TYPE=Debug -DJSON_BuildTests=On
env:
CC: gcc-10
CXX: g++-10
- name: build
run: cmake --build build --parallel 10
- name: test
run: cd build ; ctest -j 10 --output-on-failure
clang_build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
- name: install_gcc
run: |
sudo apt update
sudo apt install clang-10
shell: bash
- name: cmake
run: cmake -S . -B build -D CMAKE_BUILD_TYPE=Debug -DJSON_BuildTests=On
env:
CC: clang-10
CXX: clang++-10
- name: build
run: cmake --build build --parallel 10
- name: test
run: cd build ; ctest -j 10 --output-on-failure
clang_build_cxx20:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
- name: install_gcc
run: |
sudo apt update
sudo apt install clang-10
shell: bash
- name: cmake
run: cmake -S . -B build -D CMAKE_BUILD_TYPE=Debug -DJSON_BuildTests=On -DCMAKE_CXX_STANDARD=20 -DCMAKE_CXX_STANDARD_REQUIRED=ON
env:
CC: clang-10
CXX: clang++-10
- name: build
run: cmake --build build --parallel 10
- name: test
run: cd build ; ctest -j 10 --output-on-failure

View File

@@ -1,54 +0,0 @@
name: Windows
on: [push, pull_request]
jobs:
msvc2019:
runs-on: windows-latest
steps:
- uses: actions/checkout@v1
- name: cmake
run: cmake -S . -B build -G "Visual Studio 16 2019" -D CMAKE_BUILD_TYPE=Debug -DJSON_BuildTests=On
- name: build
run: cmake --build build --parallel 10
- name: test
run: cd build ; ctest -j 10 -C Debug --exclude-regex "test-unicode" --output-on-failure
clang10:
runs-on: windows-latest
steps:
- uses: actions/checkout@v1
- name: install Clang
run: curl -fsSL -o LLVM10.exe https://github.com/llvm/llvm-project/releases/download/llvmorg-10.0.0/LLVM-10.0.0-win64.exe ; 7z x LLVM10.exe -y -o"C:/Program Files/LLVM"
- name: cmake
run: cmake -S . -B build -DCMAKE_CXX_COMPILER="C:/Program Files/LLVM/bin/clang++.exe" -G"MinGW Makefiles" -DCMAKE_BUILD_TYPE=Debug -DJSON_BuildTests=On
- name: build
run: cmake --build build --parallel 10
- name: test
run: cd build ; ctest -j 10 -C Debug --exclude-regex "test-unicode" --output-on-failure
clang-cl-10-x64:
runs-on: windows-latest
steps:
- uses: actions/checkout@v1
- name: cmake
run: cmake -S . -B build -G "Visual Studio 16 2019" -A x64 -T ClangCL -DJSON_BuildTests=On
- name: build
run: cmake --build build --config Debug --parallel 10
- name: test
run: cd build ; ctest -j 10 -C Debug --exclude-regex "test-unicode" --output-on-failure
clang-cl-10-x86:
runs-on: windows-latest
steps:
- uses: actions/checkout@v1
- name: cmake
run: cmake -S . -B build -G "Visual Studio 16 2019" -A Win32 -T ClangCL -DJSON_BuildTests=On
- name: build
run: cmake --build build --config Debug --parallel 10
- name: test
run: cd build ; ctest -j 10 -C Debug --exclude-regex "test-unicode" --output-on-failure

14
.gitignore vendored
View File

@@ -18,19 +18,7 @@ me.nlohmann.json.docset
benchmarks/files/numbers/*.json
.wsjcpp-logs/*
.wsjcpp/*
.idea
/cmake-build-*
cmake-build-debug
test/test-*
/.vs
doc/mkdocs/venv/
doc/mkdocs/docs/images
doc/mkdocs/docs/examples
doc/mkdocs/site
doc/mkdocs/docs/__pycache__/
doc/xml
/doc/docset/nlohmann_json.docset/

View File

@@ -10,6 +10,17 @@ sudo: required
group: edge
###################
# global settings #
###################
env:
global:
# The next declaration is the encrypted COVERITY_SCAN_TOKEN, created
# via the "travis encrypt" command using the project repo's public key
- secure: "m89SSgE+ASLO38rSKx7MTXK3n5NkP9bIx95jwY71YEiuFzib30PDJ/DifKnXxBjvy/AkCGztErQRk/8ZCvq+4HXozU2knEGnL/RUitvlwbhzfh2D4lmS3BvWBGS3N3NewoPBrRmdcvnT0xjOGXxtZaJ3P74TkB9GBnlz/HmKORA="
################
# build matrix #
################
@@ -32,15 +43,12 @@ matrix:
- os: linux
compiler: clang
env:
- COMPILER=clang++-7
- COMPILER=clang++-5.0
- CMAKE_OPTIONS=-DJSON_Sanitizer=ON
- UBSAN_OPTIONS=print_stacktrace=1,suppressions=$(pwd)/test/src/UBSAN.supp
addons:
apt:
sources: ['ubuntu-toolchain-r-test', 'llvm-toolchain-trusty-7']
packages: ['g++-6', 'clang-7', 'ninja-build']
before_script:
- export PATH=$PATH:/usr/lib/llvm-7/bin
sources: ['ubuntu-toolchain-r-test', 'llvm-toolchain-trusty-5.0']
packages: ['g++-6', 'clang-5.0', 'ninja-build']
# cppcheck
- os: linux
@@ -84,17 +92,16 @@ matrix:
- os: linux
compiler: gcc
dist: bionic
addons:
apt:
sources: ['ubuntu-toolchain-r-test']
packages: ['g++-7', 'ninja-build']
packages: ['g++-4.9', 'ninja-build']
before_script:
- pip install --user cpp-coveralls
after_success:
- coveralls --build-root test --include include/nlohmann --gcov 'gcov-7' --gcov-options '\-lp'
- coveralls --build-root test --include include/nlohmann --gcov 'gcov-4.9' --gcov-options '\-lp'
env:
- COMPILER=g++-7
- COMPILER=g++-4.9
- CMAKE_OPTIONS=-DJSON_Coverage=ON
- MULTIPLE_HEADERS=ON
@@ -118,36 +125,44 @@ matrix:
env:
- SPECIAL=coverity
- COMPILER=clang++-3.6
# The next declaration is the encrypted COVERITY_SCAN_TOKEN, created
# via the "travis encrypt" command using the project repo's public key
- secure: "m89SSgE+ASLO38rSKx7MTXK3n5NkP9bIx95jwY71YEiuFzib30PDJ/DifKnXxBjvy/AkCGztErQRk/8ZCvq+4HXozU2knEGnL/RUitvlwbhzfh2D4lmS3BvWBGS3N3NewoPBrRmdcvnT0xjOGXxtZaJ3P74TkB9GBnlz/HmKORA="
# OSX / Clang
- os: osx
osx_image: xcode10.2
osx_image: xcode6.4
- os: osx
osx_image: xcode11.2
osx_image: xcode7.3
- os: osx
osx_image: xcode12
osx_image: xcode8
- os: osx
osx_image: xcode12
env:
- IMPLICIT_CONVERSIONS=OFF
osx_image: xcode8.1
- os: osx
osx_image: xcode8.2
- os: osx
osx_image: xcode8.3
- os: osx
osx_image: xcode9
- os: osx
osx_image: xcode9.1
- os: osx
osx_image: xcode9.2
- os: osx
osx_image: xcode9.3
- os: osx
osx_image: xcode9.4
# Linux / GCC
- os: linux
compiler: gcc
env: COMPILER=g++-4.8
addons:
apt:
sources: ['ubuntu-toolchain-r-test']
packages: ['g++-4.8', 'ninja-build']
- os: linux
compiler: gcc
env: COMPILER=g++-4.9
@@ -188,33 +203,15 @@ matrix:
sources: ['ubuntu-toolchain-r-test']
packages: ['g++-8', 'ninja-build']
- os: linux
compiler: gcc
env: COMPILER=g++-9
addons:
apt:
sources: ['ubuntu-toolchain-r-test']
packages: ['g++-9', 'ninja-build']
- os: linux
compiler: gcc
env:
- COMPILER=g++-9
- IMPLICIT_CONVERSIONS=OFF
- COMPILER=g++-8
- CXXFLAGS=-std=c++17
addons:
apt:
sources: ['ubuntu-toolchain-r-test']
packages: ['g++-9', 'ninja-build']
- os: linux
compiler: gcc
env:
- COMPILER=g++-9
- CXX_STANDARD=17
addons:
apt:
sources: ['ubuntu-toolchain-r-test']
packages: ['g++-9', 'ninja-build']
packages: ['g++-8', 'ninja-build']
# Linux / Clang
@@ -282,23 +279,15 @@ matrix:
sources: ['ubuntu-toolchain-r-test', 'llvm-toolchain-trusty-6.0']
packages: ['g++-6', 'clang-6.0', 'ninja-build']
- os: linux
compiler: clang
env: COMPILER=clang++-7
addons:
apt:
sources: ['ubuntu-toolchain-r-test', 'llvm-toolchain-trusty-7']
packages: ['g++-6', 'clang-7', 'ninja-build']
- os: linux
compiler: clang
env:
- COMPILER=clang++-7
- CXX_STANDARD=17
- COMPILER=clang++-6.0
- CXXFLAGS=-std=c++1z
addons:
apt:
sources: ['ubuntu-toolchain-r-test', 'llvm-toolchain-trusty-7']
packages: ['g++-7', 'clang-7', 'ninja-build']
sources: ['ubuntu-toolchain-r-test', 'llvm-toolchain-trusty-6.0']
packages: ['g++-6', 'clang-6.0', 'ninja-build']
################
# build script #
@@ -318,16 +307,15 @@ script:
- if [[ "${COMPILER}" != "" ]]; then export CXX=${COMPILER}; fi
# by default, use the single-header version
- if [[ "${MULTIPLE_HEADERS}" == "" ]]; then export MULTIPLE_HEADERS=OFF; fi
# by default, use implicit conversions
- if [[ "${IMPLICIT_CONVERSIONS}" == "" ]]; then export IMPLICIT_CONVERSIONS=ON; fi
# append CXX_STANDARD to CMAKE_OPTIONS if required
- CMAKE_OPTIONS+=${CXX_STANDARD:+ -DCMAKE_CXX_STANDARD=$CXX_STANDARD -DCMAKE_CXX_STANDARD_REQUIRED=ON}
# show OS/compiler version
- uname -a
- $CXX --version
# compile and execute unit tests
- mkdir -p build && cd build
- cmake .. ${CMAKE_OPTIONS} -DJSON_MultipleHeaders=${MULTIPLE_HEADERS} -DJSON_ImplicitConversions=${IMPLICIT_CONVERSIONS} -DJSON_BuildTests=On -GNinja && cmake --build . --config Release
- ctest -C Release --timeout 2700 -V -j
- cmake .. ${CMAKE_OPTIONS} -DJSON_MultipleHeaders=${MULTIPLE_HEADERS} -GNinja && cmake --build . --config Release
- ctest -C Release -V -j
- cd ..
# check if homebrew works (only checks develop branch)

View File

@@ -1,19 +1,10 @@
cmake_minimum_required(VERSION 3.1)
cmake_minimum_required(VERSION 3.8)
##
## PROJECT
## name and version
##
project(nlohmann_json VERSION 3.9.1 LANGUAGES CXX)
##
## MAIN_PROJECT CHECK
## determine if nlohmann_json is built as a subproject (using add_subdirectory) or if it is the main project
##
set(MAIN_PROJECT OFF)
if (CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_SOURCE_DIR)
set(MAIN_PROJECT ON)
endif()
project(nlohmann_json VERSION 3.2.0 LANGUAGES CXX)
##
## INCLUDE
@@ -24,32 +15,22 @@ include(ExternalProject)
##
## OPTIONS
##
if (POLICY CMP0077)
# Allow CMake 3.13+ to override options when using FetchContent / add_subdirectory.
cmake_policy(SET CMP0077 NEW)
endif ()
option(JSON_BuildTests "Build the unit tests when BUILD_TESTING is enabled." ${MAIN_PROJECT})
option(JSON_Install "Install CMake targets during install step." ${MAIN_PROJECT})
option(JSON_BuildTests "Build the unit tests when BUILD_TESTING is enabled." ON)
option(JSON_MultipleHeaders "Use non-amalgamated version of the library." OFF)
option(JSON_ImplicitConversions "Enable implicit conversions." ON)
##
## CONFIGURATION
##
include(GNUInstallDirs)
set(NLOHMANN_JSON_TARGET_NAME ${PROJECT_NAME})
set(NLOHMANN_JSON_CONFIG_INSTALL_DIR "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}" CACHE INTERNAL "")
set(NLOHMANN_JSON_INCLUDE_INSTALL_DIR "${CMAKE_INSTALL_INCLUDEDIR}")
set(NLOHMANN_JSON_CONFIG_INSTALL_DIR "lib/cmake/${PROJECT_NAME}"
CACHE INTERNAL "")
set(NLOHMANN_JSON_INCLUDE_INSTALL_DIR "include")
set(NLOHMANN_JSON_TARGETS_EXPORT_NAME "${PROJECT_NAME}Targets")
set(NLOHMANN_JSON_CMAKE_CONFIG_TEMPLATE "cmake/config.cmake.in")
set(NLOHMANN_JSON_CMAKE_CONFIG_DIR "${CMAKE_CURRENT_BINARY_DIR}")
set(NLOHMANN_JSON_CMAKE_VERSION_CONFIG_FILE "${NLOHMANN_JSON_CMAKE_CONFIG_DIR}/${PROJECT_NAME}ConfigVersion.cmake")
set(NLOHMANN_JSON_CMAKE_PROJECT_CONFIG_FILE "${NLOHMANN_JSON_CMAKE_CONFIG_DIR}/${PROJECT_NAME}Config.cmake")
set(NLOHMANN_JSON_CMAKE_PROJECT_TARGETS_FILE "${NLOHMANN_JSON_CMAKE_CONFIG_DIR}/${PROJECT_NAME}Targets.cmake")
set(NLOHMANN_JSON_PKGCONFIG_INSTALL_DIR "${CMAKE_INSTALL_LIBDIR}/pkgconfig")
if (JSON_MultipleHeaders)
set(NLOHMANN_JSON_INCLUDE_BUILD_DIR "${PROJECT_SOURCE_DIR}/include/")
@@ -59,27 +40,13 @@ else()
message(STATUS "Using the single-header code from ${NLOHMANN_JSON_INCLUDE_BUILD_DIR}")
endif()
if (NOT JSON_ImplicitConversions)
message(STATUS "Implicit conversions are disabled")
endif()
##
## TARGET
## create target and add include path
##
add_library(${NLOHMANN_JSON_TARGET_NAME} INTERFACE)
add_library(${PROJECT_NAME}::${NLOHMANN_JSON_TARGET_NAME} ALIAS ${NLOHMANN_JSON_TARGET_NAME})
if (${CMAKE_VERSION} VERSION_LESS "3.8.0")
target_compile_features(${NLOHMANN_JSON_TARGET_NAME} INTERFACE cxx_range_for)
else()
target_compile_features(${NLOHMANN_JSON_TARGET_NAME} INTERFACE cxx_std_11)
endif()
target_compile_definitions(
${NLOHMANN_JSON_TARGET_NAME}
INTERFACE
JSON_USE_IMPLICIT_CONVERSIONS=$<BOOL:${JSON_ImplicitConversions}>
)
target_compile_features(${NLOHMANN_JSON_TARGET_NAME} INTERFACE cxx_std_11)
target_include_directories(
${NLOHMANN_JSON_TARGET_NAME}
@@ -93,25 +60,20 @@ if (MSVC)
set(NLOHMANN_ADD_NATVIS TRUE)
set(NLOHMANN_NATVIS_FILE "nlohmann_json.natvis")
target_sources(
${NLOHMANN_JSON_TARGET_NAME}
INTERFACE
${NLOHMANN_JSON_TARGET_NAME}
INTERFACE
$<INSTALL_INTERFACE:${NLOHMANN_NATVIS_FILE}>
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/${NLOHMANN_NATVIS_FILE}>
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/${NLOHMANN_NATVIS_FILE}>
)
endif()
# Install a pkg-config file, so other tools can find this.
CONFIGURE_FILE(
"${CMAKE_CURRENT_SOURCE_DIR}/cmake/pkg-config.pc.in"
"${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}.pc"
)
##
## TESTS
## create and configure the unit test target
##
if (JSON_BuildTests)
include(CTest)
include(CTest) #adds option BUILD_TESTING (default ON)
if(BUILD_TESTING AND JSON_BuildTests)
enable_testing()
add_subdirectory(test)
endif()
@@ -121,52 +83,41 @@ endif()
## install header files, generate and install cmake config files for find_package()
##
include(CMakePackageConfigHelpers)
# use a custom package version config file instead of
# write_basic_package_version_file to ensure that it's architecture-independent
# https://github.com/nlohmann/json/issues/1697
configure_file(
"cmake/nlohmann_jsonConfigVersion.cmake.in"
${NLOHMANN_JSON_CMAKE_VERSION_CONFIG_FILE}
@ONLY
write_basic_package_version_file(
${NLOHMANN_JSON_CMAKE_VERSION_CONFIG_FILE} COMPATIBILITY SameMajorVersion
)
configure_file(
configure_package_config_file(
${NLOHMANN_JSON_CMAKE_CONFIG_TEMPLATE}
${NLOHMANN_JSON_CMAKE_PROJECT_CONFIG_FILE}
@ONLY
INSTALL_DESTINATION ${NLOHMANN_JSON_CONFIG_INSTALL_DIR}
)
if(JSON_Install)
install(
DIRECTORY ${NLOHMANN_JSON_INCLUDE_BUILD_DIR}
DESTINATION ${NLOHMANN_JSON_INCLUDE_INSTALL_DIR}
)
install(
FILES ${NLOHMANN_JSON_CMAKE_PROJECT_CONFIG_FILE} ${NLOHMANN_JSON_CMAKE_VERSION_CONFIG_FILE}
DESTINATION ${NLOHMANN_JSON_CONFIG_INSTALL_DIR}
)
if (NLOHMANN_ADD_NATVIS)
install(
DIRECTORY ${NLOHMANN_JSON_INCLUDE_BUILD_DIR}
DESTINATION ${NLOHMANN_JSON_INCLUDE_INSTALL_DIR}
)
install(
FILES ${NLOHMANN_JSON_CMAKE_PROJECT_CONFIG_FILE} ${NLOHMANN_JSON_CMAKE_VERSION_CONFIG_FILE}
DESTINATION ${NLOHMANN_JSON_CONFIG_INSTALL_DIR}
)
if (NLOHMANN_ADD_NATVIS)
install(
FILES ${NLOHMANN_NATVIS_FILE}
DESTINATION .
)
endif()
export(
TARGETS ${NLOHMANN_JSON_TARGET_NAME}
NAMESPACE ${PROJECT_NAME}::
FILE ${NLOHMANN_JSON_CMAKE_PROJECT_TARGETS_FILE}
)
install(
TARGETS ${NLOHMANN_JSON_TARGET_NAME}
EXPORT ${NLOHMANN_JSON_TARGETS_EXPORT_NAME}
INCLUDES DESTINATION ${NLOHMANN_JSON_INCLUDE_INSTALL_DIR}
)
install(
EXPORT ${NLOHMANN_JSON_TARGETS_EXPORT_NAME}
NAMESPACE ${PROJECT_NAME}::
DESTINATION ${NLOHMANN_JSON_CONFIG_INSTALL_DIR}
)
install(
FILES "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}.pc"
DESTINATION ${NLOHMANN_JSON_PKGCONFIG_INSTALL_DIR}
)
FILES ${NLOHMANN_NATVIS_FILE}
DESTINATION .
)
endif()
export(
TARGETS ${NLOHMANN_JSON_TARGET_NAME}
NAMESPACE ${PROJECT_NAME}::
FILE ${NLOHMANN_JSON_CMAKE_PROJECT_TARGETS_FILE}
)
install(
TARGETS ${NLOHMANN_JSON_TARGET_NAME}
EXPORT ${NLOHMANN_JSON_TARGETS_EXPORT_NAME}
INCLUDES DESTINATION ${NLOHMANN_JSON_INCLUDE_INSTALL_DIR}
)
install(
EXPORT ${NLOHMANN_JSON_TARGETS_EXPORT_NAME}
NAMESPACE ${PROJECT_NAME}::
DESTINATION ${NLOHMANN_JSON_CONFIG_INSTALL_DIR}
)

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
MIT License
Copyright (c) 2013-2021 Niels Lohmann
Copyright (c) 2013-2018 Niels Lohmann
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

652
Makefile
View File

@@ -1,66 +1,97 @@
.PHONY: pretty clean ChangeLog.md release
.PHONY: pretty clean ChangeLog.md
##########################################################################
# configuration
##########################################################################
SRCS = include/nlohmann/json.hpp \
include/nlohmann/json_fwd.hpp \
include/nlohmann/adl_serializer.hpp \
include/nlohmann/detail/conversions/from_json.hpp \
include/nlohmann/detail/conversions/to_chars.hpp \
include/nlohmann/detail/conversions/to_json.hpp \
include/nlohmann/detail/exceptions.hpp \
include/nlohmann/detail/input/binary_reader.hpp \
include/nlohmann/detail/input/input_adapters.hpp \
include/nlohmann/detail/input/json_sax.hpp \
include/nlohmann/detail/input/lexer.hpp \
include/nlohmann/detail/input/parser.hpp \
include/nlohmann/detail/iterators/internal_iterator.hpp \
include/nlohmann/detail/iterators/iter_impl.hpp \
include/nlohmann/detail/iterators/iteration_proxy.hpp \
include/nlohmann/detail/iterators/json_reverse_iterator.hpp \
include/nlohmann/detail/iterators/primitive_iterator.hpp \
include/nlohmann/detail/json_pointer.hpp \
include/nlohmann/detail/json_ref.hpp \
include/nlohmann/detail/macro_scope.hpp \
include/nlohmann/detail/macro_unscope.hpp \
include/nlohmann/detail/meta/cpp_future.hpp \
include/nlohmann/detail/meta/detected.hpp \
include/nlohmann/detail/meta/type_traits.hpp \
include/nlohmann/detail/meta/void_t.hpp \
include/nlohmann/detail/output/binary_writer.hpp \
include/nlohmann/detail/output/output_adapters.hpp \
include/nlohmann/detail/output/serializer.hpp \
include/nlohmann/detail/value_t.hpp
# directory to recent compiler binaries
COMPILER_DIR=/usr/local/opt/llvm/bin
UNAME = $(shell uname)
CXX=clang++
# find GNU sed to use `-i` parameter
SED:=$(shell command -v gsed || which sed)
##########################################################################
# source files
##########################################################################
# the list of sources in the include folder
SRCS=$(shell find include -type f | sort)
# the single header (amalgamated from the source files)
AMALGAMATED_FILE=single_include/nlohmann/json.hpp
##########################################################################
# documentation of the Makefile's targets
##########################################################################
# main target
all:
@echo "amalgamate - amalgamate file single_include/nlohmann/json.hpp from the include/nlohmann sources"
@echo "ChangeLog.md - generate ChangeLog file"
@echo "check - compile and execute test suite"
@echo "check-amalgamation - check whether sources have been amalgamated"
@echo "check-fast - compile and execute test suite (skip long-running tests)"
@echo "clean - remove built files"
@echo "coverage - create coverage information with lcov"
@echo "cppcheck - analyze code with cppcheck"
@echo "cpplint - analyze code with cpplint"
@echo "clang_tidy - analyze code with Clang-Tidy"
@echo "clang_analyze - analyze code with Clang-Analyzer"
@echo "doctest - compile example files and check their output"
@echo "fuzz_testing - prepare fuzz testing of the JSON parser"
@echo "fuzz_testing_bson - prepare fuzz testing of the BSON parser"
@echo "fuzz_testing_cbor - prepare fuzz testing of the CBOR parser"
@echo "fuzz_testing_msgpack - prepare fuzz testing of the MessagePack parser"
@echo "fuzz_testing_ubjson - prepare fuzz testing of the UBJSON parser"
@echo "json_unit - create single-file test executable"
@echo "pedantic_clang - run Clang with maximal warning flags"
@echo "pedantic_gcc - run GCC with maximal warning flags"
@echo "pretty - beautify code with Artistic Style"
@echo "run_benchmarks - build and run benchmarks"
##########################################################################
# unit tests
##########################################################################
# build unit tests
json_unit:
@$(MAKE) json_unit -C test
# run unit tests
check:
$(MAKE) check -C test
check-fast:
$(MAKE) check -C test TEST_PATTERN=""
# clean up
clean:
rm -fr json_unit json_benchmarks fuzz fuzz-testing *.dSYM test/*.dSYM
rm -fr benchmarks/files/numbers/*.json
rm -fr build_coverage build_benchmarks
$(MAKE) clean -Cdoc
$(MAKE) clean -Ctest
##########################################################################
# coverage
##########################################################################
coverage:
rm -fr cmake-build-coverage
mkdir cmake-build-coverage
cd cmake-build-coverage ; cmake .. -GNinja -DCMAKE_BUILD_TYPE=Debug -DJSON_Coverage=ON -DJSON_MultipleHeaders=ON
cd cmake-build-coverage ; ninja
cd cmake-build-coverage ; ctest -j10
cd cmake-build-coverage ; ninja lcov_html
open cmake-build-coverage/test/html/index.html
mkdir build_coverage
cd build_coverage ; CXX=g++-7 cmake .. -GNinja -DJSON_Coverage=ON -DJSON_MultipleHeaders=ON
cd build_coverage ; ninja
cd build_coverage ; ctest -E '.*_default' -j10
cd build_coverage ; ninja lcov_html
open build_coverage/test/html/index.html
##########################################################################
# documentation tests
@@ -76,309 +107,101 @@ doctest:
##########################################################################
# calling Clang with all warnings, except:
# -Wno-c++2a-compat: u8 literals will behave differently in C++20...
# -Wno-deprecated-declarations: the library deprecated some functions
# -Wno-documentation-unknown-command: code uses user-defined commands like @complexity
# -Wno-exit-time-destructors: warning in json code triggered by NLOHMANN_JSON_SERIALIZE_ENUM
# -Wno-float-equal: not all comparisons in the tests can be replaced by Approx
# -Wno-missing-prototypes: for NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE
# -Wno-padded: padding is nothing to warn about
# -Wno-range-loop-analysis: items tests "for(const auto i...)"
# -Wno-extra-semi-stmt: spurious warnings for semicolons after JSON_ASSERT()
# -Wno-switch-enum -Wno-covered-switch-default: pedantic/contradicting warnings about switches
# -Wno-exit-time-destructors: warning in Catch code
# -Wno-keyword-macro: unit-tests use "#define private public"
# -Wno-deprecated-declarations: the library deprecated some functions
# -Wno-weak-vtables: exception class is defined inline, but has virtual method
# -Wno-range-loop-analysis: items tests "for(const auto i...)"
# -Wno-float-equal: not all comparisons in the tests can be replaced by Approx
# -Wno-switch-enum -Wno-covered-switch-default: pedantic/contradicting warnings about switches
# -Wno-padded: padding is nothing to warn about
pedantic_clang:
rm -fr cmake-build-pedantic
CXXFLAGS=" \
$(MAKE) json_unit CXXFLAGS="\
-std=c++11 -Wno-c++98-compat -Wno-c++98-compat-pedantic \
-Werror \
-Weverything \
-Wno-c++2a-compat \
-Wno-deprecated-declarations \
-Wno-documentation-unknown-command \
-Wno-exit-time-destructors \
-Wno-float-equal \
-Wno-missing-prototypes \
-Wno-padded \
-Wno-keyword-macro \
-Wno-deprecated-declarations \
-Wno-weak-vtables \
-Wno-range-loop-analysis \
-Wno-extra-semi-stmt \
-Wno-float-equal \
-Wno-switch-enum -Wno-covered-switch-default \
-Wno-weak-vtables" cmake -S . -B cmake-build-pedantic -GNinja -DCMAKE_BUILD_TYPE=Debug -DJSON_MultipleHeaders=ON -DJSON_BuildTests=On
cmake --build cmake-build-pedantic
-Wno-padded"
# calling GCC with most warnings
pedantic_gcc:
rm -fr cmake-build-pedantic
CXXFLAGS=" \
$(MAKE) json_unit CXXFLAGS="\
-std=c++11 \
-pedantic \
-Wno-deprecated-declarations \
-Werror \
--all-warnings \
--extra-warnings \
-W \
-Wno-abi-tag \
-Waddress \
-Waddress-of-packed-member \
-Wno-aggregate-return \
-Waggressive-loop-optimizations \
-Waligned-new=all \
-Wall \
-Walloc-zero \
-Walloca \
-Wanalyzer-double-fclose \
-Wanalyzer-double-free \
-Wanalyzer-exposure-through-output-file \
-Wanalyzer-file-leak \
-Wanalyzer-free-of-non-heap \
-Wanalyzer-malloc-leak \
-Wanalyzer-null-argument \
-Wanalyzer-null-dereference \
-Wanalyzer-possible-null-argument \
-Wanalyzer-possible-null-dereference \
-Wanalyzer-stale-setjmp-buffer \
-Wanalyzer-tainted-array-index \
-Wanalyzer-too-complex \
-Wanalyzer-unsafe-call-within-signal-handler \
-Wanalyzer-use-after-free \
-Wanalyzer-use-of-pointer-in-stale-stack-frame \
-Warith-conversion \
-Warray-bounds \
-Warray-bounds=2 \
-Wattribute-alias=2 \
-Wattribute-warning \
-Wattributes \
-Wbool-compare \
-Wbool-operation \
-Wbuiltin-declaration-mismatch \
-Wbuiltin-macro-redefined \
-Wc++0x-compat \
-Wc++11-compat \
-Wc++14-compat \
-Wc++17-compat \
-Wc++1z-compat \
-Wc++20-compat \
-Wc++2a-compat \
-Wcannot-profile \
-Wcast-align \
-Wcast-align=strict \
-Wcast-function-type \
-Wcast-qual \
-Wcatch-value=3 \
-Wchar-subscripts \
-Wclass-conversion \
-Wclass-memaccess \
-Wclobbered \
-Wcomma-subscript \
-Wcomment \
-Wcomments \
-Wconditionally-supported \
-Wconversion \
-Wconversion-null \
-Wcoverage-mismatch \
-Wcpp \
-Wctor-dtor-privacy \
-Wdangling-else \
-Wdate-time \
-Wdelete-incomplete \
-Wdelete-non-virtual-dtor \
-Wdeprecated \
-Wdeprecated-copy \
-Wdeprecated-copy-dtor \
-Wdeprecated-declarations \
-Wdisabled-optimization \
-Wdiv-by-zero \
-Wdouble-promotion \
-Wduplicated-branches \
-Wduplicated-cond \
-Weffc++ \
-Wempty-body \
-Wendif-labels \
-Wenum-compare \
-Wexpansion-to-defined \
-Wextra \
-Wextra-semi \
-Wfloat-conversion \
-Wfloat-equal \
-Wformat -Wformat-contains-nul \
-Wformat -Wformat-extra-args \
-Wformat -Wformat-nonliteral \
-Wformat -Wformat-security \
-Wformat -Wformat-y2k \
-Wformat -Wformat-zero-length \
-Wformat-diag \
-Wformat-overflow=2 \
-Wformat-signedness \
-Wformat-truncation=2 \
-Wformat=2 \
-Wframe-address \
-Wfree-nonheap-object \
-Whsa \
-Wif-not-aligned \
-Wignored-attributes \
-Wignored-qualifiers \
-Wimplicit-fallthrough=5 \
-Winaccessible-base \
-Winherited-variadic-ctor \
-Winit-list-lifetime \
-Winit-self \
-Winline \
-Wint-in-bool-context \
-Wint-to-pointer-cast \
-Winvalid-memory-model \
-Winvalid-offsetof \
-Winvalid-pch \
-Wliteral-suffix \
-Wlogical-not-parentheses \
-Wlogical-op \
-Wno-long-long \
-Wlto-type-mismatch \
-Wmain \
-Wmaybe-uninitialized \
-Wmemset-elt-size \
-Wmemset-transposed-args \
-Wmisleading-indentation \
-Wmismatched-tags \
-Wmissing-attributes \
-Wmissing-braces \
-Wno-missing-declarations \
-Wmissing-field-initializers \
-Wmissing-include-dirs \
-Wmissing-profile \
-Wmultichar \
-Wmultiple-inheritance \
-Wmultistatement-macros \
-Wno-namespaces \
-Wnarrowing \
-Wno-noexcept \
-Wnoexcept-type \
-Wnon-template-friend \
-Wnon-virtual-dtor \
-Wnonnull \
-Wnonnull-compare \
-Wnonportable-cfstrings \
-Wnormalized=nfkc \
-Wnull-dereference \
-Wodr \
-Wold-style-cast \
-Wopenmp-simd \
-Woverflow \
-Woverlength-strings \
-Woverloaded-virtual \
-Wpacked \
-Wpacked-bitfield-compat \
-Wpacked-not-aligned \
-Wno-padded \
-Wparentheses \
-Wpedantic \
-Wpessimizing-move \
-Wplacement-new=2 \
-Wpmf-conversions \
-Wpointer-arith \
-Wpointer-compare \
-Wpragmas \
-Wprio-ctor-dtor \
-Wpsabi \
-Wredundant-decls \
-Wredundant-move \
-Wredundant-tags \
-Wregister \
-Wreorder \
-Wrestrict \
-Wreturn-local-addr \
-Wreturn-type \
-Wscalar-storage-order \
-Wsequence-point \
-Wshadow=compatible-local \
-Wshadow=global \
-Wshadow=local \
-Wshift-count-negative \
-Wshift-count-overflow \
-Wshift-negative-value \
-Wshift-overflow=2 \
-Wsign-compare \
-Wsign-conversion \
-Wsign-promo \
-Wsized-deallocation \
-Wsizeof-array-argument \
-Wsizeof-pointer-div \
-Wsizeof-pointer-memaccess \
-Wstack-protector \
-Wstrict-aliasing \
-Wstrict-aliasing=3 \
-Wstrict-null-sentinel \
-Wstrict-overflow \
-Wstrict-overflow=5 \
-Wstring-compare \
-Wstringop-overflow \
-Wstringop-overflow=4 \
-Wstringop-truncation \
-Wsubobject-linkage \
-Wsuggest-attribute=cold \
-Wsuggest-attribute=const \
-Wsuggest-attribute=format \
-Wsuggest-attribute=malloc \
-Wsuggest-attribute=noreturn \
-Wsuggest-attribute=pure \
-Wsuggest-final-methods \
-Wsuggest-final-types \
-Wsuggest-override \
-Wswitch \
-Wswitch-bool \
-Wswitch-default \
-Wno-switch-enum \
-Wswitch-outside-range \
-Wswitch-unreachable \
-Wsync-nand \
-Wsynth \
-Wno-system-headers \
-Wtautological-compare \
-Wno-templates \
-Wterminate \
-Wtrampolines \
-Wtrigraphs \
-Wtype-limits \
-Wundef \
-Wuninitialized \
-Wunknown-pragmas \
-Wunreachable-code \
-Wunsafe-loop-optimizations \
-Wunused \
-Wunused-but-set-parameter \
-Wunused-but-set-variable \
-Wunused-const-variable=2 \
-Wunused-function \
-Wunused-label \
-Wno-unused-local-typedefs \
-Wunused-macros \
-Wunused-parameter \
-Wunused-result \
-Wunused-value \
-Wunused-variable \
-Wuseless-cast \
-Wvarargs \
-Wvariadic-macros \
-Wvector-operation-performance \
-Wvirtual-inheritance \
-Wvirtual-move-assign \
-Wvla \
-Wvolatile \
-Wvolatile-register-var \
-Wwrite-strings \
-Wzero-as-null-pointer-constant \
-Wzero-length-bounds \
" cmake -S . -B cmake-build-pedantic -GNinja -DCMAKE_BUILD_TYPE=Debug -DJSON_MultipleHeaders=ON -DJSON_BuildTests=On
cmake --build cmake-build-pedantic
-Wall -Wpedantic -Wextra \
-Walloca \
-Warray-bounds=2 \
-Wcast-qual -Wcast-align \
-Wchar-subscripts \
-Wconditionally-supported \
-Wconversion \
-Wdate-time \
-Wdeprecated \
-Wdisabled-optimization \
-Wdouble-promotion \
-Wduplicated-branches \
-Wduplicated-cond \
-Wformat-overflow=2 \
-Wformat-signedness \
-Wformat-truncation=2 \
-Wformat=2 \
-Wno-ignored-qualifiers \
-Wimplicit-fallthrough=5 \
-Wlogical-op \
-Wmissing-declarations \
-Wmissing-format-attribute \
-Wmissing-include-dirs \
-Wnoexcept \
-Wnonnull \
-Wnull-dereference \
-Wold-style-cast \
-Woverloaded-virtual \
-Wparentheses \
-Wplacement-new=2 \
-Wredundant-decls \
-Wreorder \
-Wrestrict \
-Wshadow=global \
-Wshift-overflow=2 \
-Wsign-conversion \
-Wsign-promo \
-Wsized-deallocation \
-Wstrict-overflow=5 \
-Wsuggest-attribute=const \
-Wsuggest-attribute=format \
-Wsuggest-attribute=noreturn \
-Wsuggest-attribute=pure \
-Wsuggest-final-methods \
-Wsuggest-final-types \
-Wsuggest-override \
-Wtrigraphs \
-Wundef \
-Wuninitialized -Wunknown-pragmas \
-Wunused \
-Wunused-const-variable=2 \
-Wunused-macros \
-Wunused-parameter \
-Wuseless-cast \
-Wvariadic-macros"
##########################################################################
# benchmarks
##########################################################################
run_benchmarks:
rm -fr cmake-build-benchmarks
mkdir cmake-build-benchmarks
cd cmake-build-benchmarks ; cmake ../benchmarks -GNinja -DCMAKE_BUILD_TYPE=Release -DJSON_BuildTests=On
cd cmake-build-benchmarks ; ninja
cd cmake-build-benchmarks ; ./json_benchmarks
mkdir build_benchmarks
cd build_benchmarks ; cmake ../benchmarks
cd build_benchmarks ; make
cd build_benchmarks ; ./json_benchmarks
##########################################################################
# fuzzing
@@ -393,14 +216,6 @@ fuzz_testing:
find test/data/json_tests -size -5k -name *json | xargs -I{} cp "{}" fuzz-testing/testcases
@echo "Execute: afl-fuzz -i fuzz-testing/testcases -o fuzz-testing/out fuzz-testing/fuzzer"
fuzz_testing_bson:
rm -fr fuzz-testing
mkdir -p fuzz-testing fuzz-testing/testcases fuzz-testing/out
$(MAKE) parse_bson_fuzzer -C test CXX=afl-clang++
mv test/parse_bson_fuzzer fuzz-testing/fuzzer
find test/data -size -5k -name *.bson | xargs -I{} cp "{}" fuzz-testing/testcases
@echo "Execute: afl-fuzz -i fuzz-testing/testcases -o fuzz-testing/out fuzz-testing/fuzzer"
fuzz_testing_cbor:
rm -fr fuzz-testing
mkdir -p fuzz-testing fuzz-testing/testcases fuzz-testing/out
@@ -439,206 +254,81 @@ fuzzing-stop:
-killall fuzzer
-killall afl-fuzz
##########################################################################
# Static analysis
# static analyzer
##########################################################################
# call cppcheck <http://cppcheck.sourceforge.net>
# Note: this target is called by Travis
# call cppcheck on the main header file
cppcheck:
cppcheck --enable=warning --inline-suppr --inconclusive --force --std=c++11 $(AMALGAMATED_FILE) --error-exitcode=1
cppcheck --enable=warning --inconclusive --force --std=c++11 $(AMALGAMATED_FILE) --error-exitcode=1
# call Clang Static Analyzer <https://clang-analyzer.llvm.org>
# compile and check with Clang Static Analyzer
clang_analyze:
rm -fr cmake-build-clang-analyze
mkdir cmake-build-clang-analyze
cd cmake-build-clang-analyze ; CCC_CXX=$(COMPILER_DIR)/clang++ CXX=$(COMPILER_DIR)/clang++ $(COMPILER_DIR)/scan-build cmake .. -GNinja -DJSON_BuildTests=On
cd cmake-build-clang-analyze ; \
$(COMPILER_DIR)/scan-build \
-enable-checker alpha.core.BoolAssignment,alpha.core.CallAndMessageUnInitRefArg,alpha.core.CastSize,alpha.core.CastToStruct,alpha.core.Conversion,alpha.core.DynamicTypeChecker,alpha.core.FixedAddr,alpha.core.PointerArithm,alpha.core.PointerSub,alpha.core.SizeofPtr,alpha.core.StackAddressAsyncEscape,alpha.core.TestAfterDivZero,alpha.deadcode.UnreachableCode,core.builtin.BuiltinFunctions,core.builtin.NoReturnFunctions,core.CallAndMessage,core.DivideZero,core.DynamicTypePropagation,core.NonnilStringConstants,core.NonNullParamChecker,core.NullDereference,core.StackAddressEscape,core.UndefinedBinaryOperatorResult,core.uninitialized.ArraySubscript,core.uninitialized.Assign,core.uninitialized.Branch,core.uninitialized.CapturedBlockVariable,core.uninitialized.UndefReturn,core.VLASize,cplusplus.InnerPointer,cplusplus.Move,cplusplus.NewDelete,cplusplus.NewDeleteLeaks,cplusplus.SelfAssignment,deadcode.DeadStores,nullability.NullableDereferenced,nullability.NullablePassedToNonnull,nullability.NullableReturnedFromNonnull,nullability.NullPassedToNonnull,nullability.NullReturnedFromNonnull \
--use-c++=$(COMPILER_DIR)/clang++ -analyze-headers -o report ninja
open cmake-build-clang-analyze/report/*/index.html
# call cpplint <https://github.com/cpplint/cpplint>
# Note: some errors expected due to false positives
cpplint:
third_party/cpplint/cpplint.py \
--filter=-whitespace,-legal,-readability/alt_tokens,-runtime/references,-runtime/explicit \
--quiet --recursive $(SRCS)
# call Clang-Tidy <https://clang.llvm.org/extra/clang-tidy/>
clang_tidy:
$(COMPILER_DIR)/clang-tidy $(SRCS) -- -Iinclude -std=c++11
# call PVS-Studio Analyzer <https://www.viva64.com/en/pvs-studio/>
pvs_studio:
rm -fr cmake-build-pvs-studio
mkdir cmake-build-pvs-studio
cd cmake-build-pvs-studio ; cmake .. -DCMAKE_EXPORT_COMPILE_COMMANDS=On -DJSON_MultipleHeaders=ON
cd cmake-build-pvs-studio ; pvs-studio-analyzer analyze -j 10
cd cmake-build-pvs-studio ; plog-converter -a'GA:1,2;64:1;CS' -t fullhtml PVS-Studio.log -o pvs
open cmake-build-pvs-studio/pvs/index.html
# call Infer <https://fbinfer.com> static analyzer
infer:
rm -fr cmake-build-infer
mkdir cmake-build-infer
cd cmake-build-infer ; infer compile -- cmake .. -DJSON_MultipleHeaders=ON ; infer run -- make -j 4
# call OCLint <http://oclint.org> static analyzer
oclint:
oclint $(SRCS) -report-type html -enable-global-analysis -o oclint_report.html -max-priority-1=10000 -max-priority-2=10000 -max-priority-3=10000 -- -std=c++11 -Iinclude
open oclint_report.html
# execute the test suite with Clang sanitizers (address and undefined behavior)
clang_sanitize:
rm -fr cmake-build-clang-sanitize
mkdir cmake-build-clang-sanitize
cd cmake-build-clang-sanitize ; CXX=$(COMPILER_DIR)/clang++ cmake .. -DJSON_Sanitizer=On -DJSON_MultipleHeaders=ON -DJSON_BuildTests=On -GNinja
cd cmake-build-clang-sanitize ; ninja
cd cmake-build-clang-sanitize ; ctest -j10
rm -fr clang_analyze_build
mkdir clang_analyze_build
cd clang_analyze_build ; CCC_CXX=/Users/niels/Documents/projects/llvm-clang/local/bin/clang++ /Users/niels/Documents/projects/llvm-clang/local/bin/scan-build cmake ..
/Users/niels/Documents/projects/llvm-clang/local/bin/scan-build -enable-checker alpha.core.DynamicTypeChecker,alpha.core.PointerArithm,alpha.core.PointerSub,alpha.cplusplus.DeleteWithNonVirtualDtor,alpha.cplusplus.IteratorRange,alpha.cplusplus.MisusedMovedObject,alpha.security.ArrayBoundV2,alpha.core.Conversion --use-c++=/Users/niels/Documents/projects/llvm-clang/local/bin/clang++ --view -analyze-headers -o clang_analyze_build/report.html make -j10 -C clang_analyze_build
##########################################################################
# Code format and source amalgamation
# maintainer targets
##########################################################################
# call the Artistic Style pretty printer on all source files
# pretty printer
pretty:
astyle \
--style=allman \
--indent=spaces=4 \
--indent-modifiers \
--indent-switches \
--indent-preproc-block \
--indent-preproc-define \
--indent-col1-comments \
--pad-oper \
--pad-header \
--align-pointer=type \
--align-reference=type \
--add-brackets \
--convert-tabs \
--close-templates \
--lineend=linux \
--preserve-date \
--suffix=none \
--formatted \
$(SRCS) $(AMALGAMATED_FILE) test/src/*.cpp test/src/*.hpp benchmarks/src/benchmarks.cpp doc/examples/*.cpp
# call the Clang-Format on all source files
pretty_format:
for FILE in $(SRCS) $(AMALGAMATED_FILE) test/src/*.cpp test/src/*.hpp benchmarks/src/benchmarks.cpp doc/examples/*.cpp; do echo $$FILE; clang-format -i $$FILE; done
astyle --style=allman --indent=spaces=4 --indent-modifiers \
--indent-switches --indent-preproc-block --indent-preproc-define \
--indent-col1-comments --pad-oper --pad-header --align-pointer=type \
--align-reference=type --add-brackets --convert-tabs --close-templates \
--lineend=linux --preserve-date --suffix=none --formatted \
$(SRCS) $(AMALGAMATED_FILE) test/src/*.cpp \
benchmarks/src/benchmarks.cpp doc/examples/*.cpp
# create single header file
amalgamate: $(AMALGAMATED_FILE)
# call the amalgamation tool and pretty print
$(AMALGAMATED_FILE): $(SRCS)
third_party/amalgamate/amalgamate.py -c third_party/amalgamate/config.json -s . --verbose=yes
$(MAKE) pretty
# check if file single_include/nlohmann/json.hpp has been amalgamated from the nlohmann sources
# Note: this target is called by Travis
# check if single_include/nlohmann/json.hpp has been amalgamated from the nlohmann sources
check-amalgamation:
@mv $(AMALGAMATED_FILE) $(AMALGAMATED_FILE)~
@$(MAKE) amalgamate
@diff $(AMALGAMATED_FILE) $(AMALGAMATED_FILE)~ || (echo "===================================================================\n Amalgamation required! Please read the contribution guidelines\n in file .github/CONTRIBUTING.md.\n===================================================================" ; mv $(AMALGAMATED_FILE)~ $(AMALGAMATED_FILE) ; false)
@mv $(AMALGAMATED_FILE)~ $(AMALGAMATED_FILE)
# check if every header in nlohmann includes sufficient headers to be compiled individually
# check if every header in nlohmann includes sufficient headers to be compiled
# individually
check-single-includes:
@for x in $(SRCS); do \
echo "Checking self-sufficiency of $$x..." ; \
echo "#include <$$x>\nint main() {}\n" | $(SED) 's|include/||' > single_include_test.cpp; \
for x in $(SRCS); do \
echo "#include <$$x>\nint main() {}\n" | sed 's|include/||' > single_include_test.cpp; \
$(CXX) $(CXXFLAGS) -Iinclude -std=c++11 single_include_test.cpp -o single_include_test; \
rm -f single_include_test.cpp single_include_test; \
rm single_include_test.cpp single_include_test; \
done
##########################################################################
# CMake
# changelog
##########################################################################
# grep "^option" CMakeLists.txt test/CMakeLists.txt | $(SED) 's/(/ /' | awk '{print $2}' | xargs
# check if all flags of our CMake files work
check_cmake_flags_do:
$(CMAKE_BINARY) --version
for flag in JSON_BuildTests JSON_Install JSON_MultipleHeaders JSON_Sanitizer JSON_Valgrind JSON_NoExceptions JSON_Coverage; do \
rm -fr cmake_build; \
mkdir cmake_build; \
echo "\n\n$(CMAKE_BINARY) .. -D$$flag=On\n" ; \
cd cmake_build ; \
$(CMAKE_BINARY) -Werror=dev .. -D$$flag=On -DCMAKE_CXX_COMPILE_FEATURES="cxx_std_11;cxx_range_for" -DCMAKE_CXX_FLAGS="-std=gnu++11" ; \
test -f Makefile || exit 1 ; \
cd .. ; \
done;
# call target `check_cmake_flags_do` twice: once for minimal required CMake version 3.1.0 and once for the installed version
check_cmake_flags:
wget https://github.com/Kitware/CMake/releases/download/v3.1.0/cmake-3.1.0-Darwin64.tar.gz
tar xfz cmake-3.1.0-Darwin64.tar.gz
CMAKE_BINARY=$(abspath cmake-3.1.0-Darwin64/CMake.app/Contents/bin/cmake) $(MAKE) check_cmake_flags_do
CMAKE_BINARY=$(shell which cmake) $(MAKE) check_cmake_flags_do
##########################################################################
# ChangeLog
##########################################################################
# Create a ChangeLog based on the git log using the GitHub Changelog Generator
# (<https://github.com/github-changelog-generator/github-changelog-generator>).
# variable to control the diffs between the last released version and the current repository state
NEXT_VERSION ?= "unreleased"
ChangeLog.md:
github_changelog_generator -o ChangeLog.md --user nlohmann --project json --simple-list --release-url https://github.com/nlohmann/json/releases/tag/%s --future-release $(NEXT_VERSION)
$(SED) -i 's|https://github.com/nlohmann/json/releases/tag/HEAD|https://github.com/nlohmann/json/tree/HEAD|' ChangeLog.md
$(SED) -i '2i All notable changes to this project will be documented in this file. This project adheres to [Semantic Versioning](http://semver.org/).' ChangeLog.md
github_changelog_generator -o ChangeLog.md --simple-list --release-url https://github.com/nlohmann/json/releases/tag/%s --future-release $(NEXT_VERSION)
gsed -i 's|https://github.com/nlohmann/json/releases/tag/HEAD|https://github.com/nlohmann/json/tree/HEAD|' ChangeLog.md
gsed -i '2i All notable changes to this project will be documented in this file. This project adheres to [Semantic Versioning](http://semver.org/).' ChangeLog.md
##########################################################################
# Release files
# release
##########################################################################
# Create the files for a release and add signatures and hashes. We use `-X` to make the resulting ZIP file
# reproducible, see <https://content.pivotal.io/blog/barriers-to-deterministic-reproducible-zip-files>.
release:
rm -fr release_files
mkdir release_files
zip -9 --recurse-paths -X include.zip $(SRCS) $(AMALGAMATED_FILE) meson.build LICENSE.MIT
zip -9 -r include.zip include/*
gpg --armor --detach-sig include.zip
mv include.zip include.zip.asc release_files
gpg --armor --detach-sig $(AMALGAMATED_FILE)
cp $(AMALGAMATED_FILE) release_files
mv $(AMALGAMATED_FILE).asc release_files
gpg --armor --detach-sig single_include/nlohmann/json.hpp
cp single_include/nlohmann/json.hpp release_files
mv single_include/nlohmann/json.hpp.asc release_files
cd release_files ; shasum -a 256 json.hpp > hashes.txt
cd release_files ; shasum -a 256 include.zip >> hashes.txt
##########################################################################
# Maintenance
##########################################################################
# clean up
clean:
rm -fr json_unit json_benchmarks fuzz fuzz-testing *.dSYM test/*.dSYM oclint_report.html
rm -fr benchmarks/files/numbers/*.json
rm -fr cmake-3.1.0-Darwin64.tar.gz cmake-3.1.0-Darwin64
rm -fr cmake-build-coverage cmake-build-benchmarks cmake-build-pedantic fuzz-testing cmake-build-clang-analyze cmake-build-pvs-studio cmake-build-infer cmake-build-clang-sanitize cmake_build
$(MAKE) clean -Cdoc
##########################################################################
# Thirdparty code
##########################################################################
update_hedley:
rm -f include/nlohmann/thirdparty/hedley/hedley.hpp include/nlohmann/thirdparty/hedley/hedley_undef.hpp
curl https://raw.githubusercontent.com/nemequ/hedley/master/hedley.h -o include/nlohmann/thirdparty/hedley/hedley.hpp
$(SED) -i 's/HEDLEY_/JSON_HEDLEY_/g' include/nlohmann/thirdparty/hedley/hedley.hpp
grep "[[:blank:]]*#[[:blank:]]*undef" include/nlohmann/thirdparty/hedley/hedley.hpp | grep -v "__" | sort | uniq | $(SED) 's/ //g' | $(SED) 's/undef/undef /g' > include/nlohmann/thirdparty/hedley/hedley_undef.hpp
$(MAKE) amalgamate

754
README.md

File diff suppressed because it is too large Load Diff

View File

@@ -3,145 +3,57 @@ version: '{build}'
environment:
matrix:
- APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
configuration: Debug
platform: x86
CXX_FLAGS: ""
LINKER_FLAGS: ""
CMAKE_OPTIONS: ""
GENERATOR: Visual Studio 14 2015
- APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017
configuration: Debug
platform: x86
CXX_FLAGS: ""
LINKER_FLAGS: ""
CMAKE_OPTIONS: ""
GENERATOR: Visual Studio 15 2017
- APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019
configuration: Debug
platform: x86
CXX_FLAGS: ""
LINKER_FLAGS: ""
CMAKE_OPTIONS: ""
GENERATOR: Visual Studio 16 2019
- APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019
configuration: Debug
platform: x64
CXX_FLAGS: ""
LINKER_FLAGS: ""
CMAKE_OPTIONS: ""
GENERATOR: Visual Studio 16 2019
- APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
configuration: Debug
COMPILER: mingw
platform: x86
CXX_FLAGS: ""
LINKER_FLAGS: ""
CMAKE_OPTIONS: ""
FLAGS: ""
GENERATOR: Ninja
- APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
configuration: Release
COMPILER: mingw
platform: x86
CXX_FLAGS: ""
LINKER_FLAGS: ""
CMAKE_OPTIONS: ""
GENERATOR: Ninja
- APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
configuration: Release
platform: x86
CXX_FLAGS: ""
LINKER_FLAGS: ""
CMAKE_OPTIONS: ""
GENERATOR: Visual Studio 14 2015
- APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
configuration: Release
platform: x86
name: with_win_header
CXX_FLAGS: ""
LINKER_FLAGS: ""
CMAKE_OPTIONS: ""
FLAGS: ""
GENERATOR: Visual Studio 14 2015
- APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017
configuration: Release
platform: x86
CXX_FLAGS: "/permissive- /std:c++latest /utf-8"
LINKER_FLAGS: ""
CMAKE_OPTIONS: ""
FLAGS: ""
GENERATOR: Visual Studio 15 2017
- APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019
configuration: Release
- APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017
platform: x86
CXX_FLAGS: ""
LINKER_FLAGS: ""
CMAKE_OPTIONS: "-DJSON_ImplicitConversions=OFF"
GENERATOR: Visual Studio 16 2019
- APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019
configuration: Release
platform: x64
CXX_FLAGS: ""
LINKER_FLAGS: ""
CMAKE_OPTIONS: ""
GENERATOR: Visual Studio 16 2019
FLAGS: "/permissive- /std:c++latest /utf-8"
GENERATOR: Visual Studio 15 2017
- APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
configuration: Release
platform: x64
CXX_FLAGS: ""
LINKER_FLAGS: ""
CMAKE_OPTIONS: ""
FLAGS: ""
GENERATOR: Visual Studio 14 2015
- APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017
configuration: Release
platform: x64
CXX_FLAGS: "/permissive- /std:c++latest /Zc:__cplusplus /utf-8 /F4000000"
LINKER_FLAGS: "/STACK:4000000"
CMAKE_OPTIONS: ""
FLAGS: ""
GENERATOR: Visual Studio 15 2017
- APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019
configuration: Release
- APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017
platform: x64
CXX_FLAGS: ""
LINKER_FLAGS: ""
CMAKE_OPTIONS: ""
GENERATOR: Visual Studio 16 2019
FLAGS: "/permissive- /std:c++latest /utf-8"
GENERATOR: Visual Studio 15 2017
init:
- cmake --version
- msbuild /version
install:
- if "%COMPILER%"=="mingw" appveyor DownloadFile https://github.com/ninja-build/ninja/releases/download/v1.6.0/ninja-win.zip -FileName ninja.zip
- if "%COMPILER%"=="mingw" 7z x ninja.zip -oC:\projects\deps\ninja > nul
- if "%COMPILER%"=="mingw" set PATH=C:\projects\deps\ninja;%PATH%
- if "%COMPILER%"=="mingw" set PATH=C:\mingw-w64\x86_64-7.3.0-posix-seh-rt_v5-rev0\mingw64\bin;%PATH%
- if "%COMPILER%"=="mingw" g++ --version
- if "%platform%"=="x86" set GENERATOR_PLATFORM=Win32
- if "%COMPILER%"=="mingw" appveyor DownloadFile https://github.com/ninja-build/ninja/releases/download/v1.6.0/ninja-win.zip -FileName ninja.zip
- if "%COMPILER%"=="mingw" 7z x ninja.zip -oC:\projects\deps\ninja > nul
- if "%COMPILER%"=="mingw" set PATH=C:\projects\deps\ninja;%PATH%
- if "%COMPILER%"=="mingw" set PATH=C:\mingw-w64\x86_64-7.3.0-posix-seh-rt_v5-rev0\mingw64\bin;%PATH%
- if "%COMPILER%"=="mingw" g++ --version
before_build:
# for with_win_header build, inject the inclusion of Windows.h to the single-header library
- ps: if ($env:name -Eq "with_win_header") { $header_path = "single_include\nlohmann\json.hpp" }
- ps: if ($env:name -Eq "with_win_header") { "#include <Windows.h>`n" + (Get-Content $header_path | Out-String) | Set-Content $header_path }
- if "%GENERATOR%"=="Ninja" (cmake . -G "%GENERATOR%" -DCMAKE_BUILD_TYPE="%configuration%" -DCMAKE_CXX_FLAGS="%CXX_FLAGS%" -DCMAKE_EXE_LINKER_FLAGS="%LINKER_FLAGS%" -DCMAKE_IGNORE_PATH="C:/Program Files/Git/usr/bin" -DJSON_BuildTests=On "%CMAKE_OPTIONS%") else (cmake . -G "%GENERATOR%" -A "%GENERATOR_PLATFORM%" -DCMAKE_CXX_FLAGS="%CXX_FLAGS%" -DCMAKE_EXE_LINKER_FLAGS="%LINKER_FLAGS%" -DCMAKE_IGNORE_PATH="C:/Program Files/Git/usr/bin" -DJSON_BuildTests=On "%CMAKE_OPTIONS%")
- cmake . -G "%GENERATOR%" -DCMAKE_CXX_FLAGS="%FLAGS%" -DCMAKE_IGNORE_PATH="C:/Program Files/Git/usr/bin"
build_script:
- cmake --build . --config "%configuration%"
- cmake --build . --config Release
test_script:
- if "%configuration%"=="Release" ctest -C "%configuration%" -V -j
# On Debug builds, skip test-unicode_all
# as it is extremely slow to run and cause
# occasional timeouts on AppVeyor.
# More info: https://github.com/nlohmann/json/pull/1570
- if "%configuration%"=="Debug" ctest --exclude-regex "test-unicode" -C "%configuration%" -V -j
- ctest -C Release -V -j

View File

@@ -14,12 +14,14 @@ add_subdirectory(thirdparty/benchmark)
include_directories(thirdparty)
include_directories(${CMAKE_SOURCE_DIR}/../single_include)
# download test data
include(${CMAKE_SOURCE_DIR}/../cmake/download_test_data.cmake)
# copy test files to build folder
file(COPY ${CMAKE_SOURCE_DIR}/data DESTINATION .)
file(COPY ${CMAKE_SOURCE_DIR}/../test/data/regression/floats.json
${CMAKE_SOURCE_DIR}/../test/data/regression/unsigned_ints.json
${CMAKE_SOURCE_DIR}/../test/data/regression/signed_ints.json
DESTINATION data/numbers)
# benchmark binary
add_executable(json_benchmarks src/benchmarks.cpp)
target_compile_features(json_benchmarks PRIVATE cxx_std_11)
target_link_libraries(json_benchmarks benchmark ${CMAKE_THREAD_LIBS_INIT})
add_dependencies(json_benchmarks download_test_data)
target_include_directories(json_benchmarks PRIVATE ${CMAKE_BINARY_DIR}/include)

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,7 +1,6 @@
#include "benchmark/benchmark.h"
#include <nlohmann/json.hpp>
#include <fstream>
#include <test_data.hpp>
using json = nlohmann::json;
@@ -29,14 +28,14 @@ static void ParseFile(benchmark::State& state, const char* filename)
std::ifstream file(filename, std::ios::binary | std::ios::ate);
state.SetBytesProcessed(state.iterations() * file.tellg());
}
BENCHMARK_CAPTURE(ParseFile, jeopardy, TEST_DATA_DIRECTORY "/jeopardy/jeopardy.json");
BENCHMARK_CAPTURE(ParseFile, canada, TEST_DATA_DIRECTORY "/nativejson-benchmark/canada.json");
BENCHMARK_CAPTURE(ParseFile, citm_catalog, TEST_DATA_DIRECTORY "/nativejson-benchmark/citm_catalog.json");
BENCHMARK_CAPTURE(ParseFile, twitter, TEST_DATA_DIRECTORY "/nativejson-benchmark/twitter.json");
BENCHMARK_CAPTURE(ParseFile, floats, TEST_DATA_DIRECTORY "/regression/floats.json");
BENCHMARK_CAPTURE(ParseFile, signed_ints, TEST_DATA_DIRECTORY "/regression/signed_ints.json");
BENCHMARK_CAPTURE(ParseFile, unsigned_ints, TEST_DATA_DIRECTORY "/regression/unsigned_ints.json");
BENCHMARK_CAPTURE(ParseFile, small_signed_ints, TEST_DATA_DIRECTORY "/regression/small_signed_ints.json");
BENCHMARK_CAPTURE(ParseFile, jeopardy, "data/jeopardy/jeopardy.json");
BENCHMARK_CAPTURE(ParseFile, canada, "data/nativejson-benchmark/canada.json");
BENCHMARK_CAPTURE(ParseFile, citm_catalog, "data/nativejson-benchmark/citm_catalog.json");
BENCHMARK_CAPTURE(ParseFile, twitter, "data/nativejson-benchmark/twitter.json");
BENCHMARK_CAPTURE(ParseFile, floats, "data/numbers/floats.json");
BENCHMARK_CAPTURE(ParseFile, signed_ints, "data/numbers/signed_ints.json");
BENCHMARK_CAPTURE(ParseFile, unsigned_ints, "data/numbers/unsigned_ints.json");
//////////////////////////////////////////////////////////////////////////////
// parse JSON from string
@@ -62,14 +61,13 @@ static void ParseString(benchmark::State& state, const char* filename)
state.SetBytesProcessed(state.iterations() * str.size());
}
BENCHMARK_CAPTURE(ParseString, jeopardy, TEST_DATA_DIRECTORY "/jeopardy/jeopardy.json");
BENCHMARK_CAPTURE(ParseString, canada, TEST_DATA_DIRECTORY "/nativejson-benchmark/canada.json");
BENCHMARK_CAPTURE(ParseString, citm_catalog, TEST_DATA_DIRECTORY "/nativejson-benchmark/citm_catalog.json");
BENCHMARK_CAPTURE(ParseString, twitter, TEST_DATA_DIRECTORY "/nativejson-benchmark/twitter.json");
BENCHMARK_CAPTURE(ParseString, floats, TEST_DATA_DIRECTORY "/regression/floats.json");
BENCHMARK_CAPTURE(ParseString, signed_ints, TEST_DATA_DIRECTORY "/regression/signed_ints.json");
BENCHMARK_CAPTURE(ParseString, unsigned_ints, TEST_DATA_DIRECTORY "/regression/unsigned_ints.json");
BENCHMARK_CAPTURE(ParseString, small_signed_ints, TEST_DATA_DIRECTORY "/regression/small_signed_ints.json");
BENCHMARK_CAPTURE(ParseString, jeopardy, "data/jeopardy/jeopardy.json");
BENCHMARK_CAPTURE(ParseString, canada, "data/nativejson-benchmark/canada.json");
BENCHMARK_CAPTURE(ParseString, citm_catalog, "data/nativejson-benchmark/citm_catalog.json");
BENCHMARK_CAPTURE(ParseString, twitter, "data/nativejson-benchmark/twitter.json");
BENCHMARK_CAPTURE(ParseString, floats, "data/numbers/floats.json");
BENCHMARK_CAPTURE(ParseString, signed_ints, "data/numbers/signed_ints.json");
BENCHMARK_CAPTURE(ParseString, unsigned_ints, "data/numbers/unsigned_ints.json");
//////////////////////////////////////////////////////////////////////////////
@@ -89,22 +87,20 @@ static void Dump(benchmark::State& state, const char* filename, int indent)
state.SetBytesProcessed(state.iterations() * j.dump(indent).size());
}
BENCHMARK_CAPTURE(Dump, jeopardy / -, TEST_DATA_DIRECTORY "/jeopardy/jeopardy.json", -1);
BENCHMARK_CAPTURE(Dump, jeopardy / 4, TEST_DATA_DIRECTORY "/jeopardy/jeopardy.json", 4);
BENCHMARK_CAPTURE(Dump, canada / -, TEST_DATA_DIRECTORY "/nativejson-benchmark/canada.json", -1);
BENCHMARK_CAPTURE(Dump, canada / 4, TEST_DATA_DIRECTORY "/nativejson-benchmark/canada.json", 4);
BENCHMARK_CAPTURE(Dump, citm_catalog / -, TEST_DATA_DIRECTORY "/nativejson-benchmark/citm_catalog.json", -1);
BENCHMARK_CAPTURE(Dump, citm_catalog / 4, TEST_DATA_DIRECTORY "/nativejson-benchmark/citm_catalog.json", 4);
BENCHMARK_CAPTURE(Dump, twitter / -, TEST_DATA_DIRECTORY "/nativejson-benchmark/twitter.json", -1);
BENCHMARK_CAPTURE(Dump, twitter / 4, TEST_DATA_DIRECTORY "/nativejson-benchmark/twitter.json", 4);
BENCHMARK_CAPTURE(Dump, floats / -, TEST_DATA_DIRECTORY "/regression/floats.json", -1);
BENCHMARK_CAPTURE(Dump, floats / 4, TEST_DATA_DIRECTORY "/regression/floats.json", 4);
BENCHMARK_CAPTURE(Dump, signed_ints / -, TEST_DATA_DIRECTORY "/regression/signed_ints.json", -1);
BENCHMARK_CAPTURE(Dump, signed_ints / 4, TEST_DATA_DIRECTORY "/regression/signed_ints.json", 4);
BENCHMARK_CAPTURE(Dump, unsigned_ints / -, TEST_DATA_DIRECTORY "/regression/unsigned_ints.json", -1);
BENCHMARK_CAPTURE(Dump, unsigned_ints / 4, TEST_DATA_DIRECTORY "/regression/unsigned_ints.json", 4);
BENCHMARK_CAPTURE(Dump, small_signed_ints / -, TEST_DATA_DIRECTORY "/regression/small_signed_ints.json", -1);
BENCHMARK_CAPTURE(Dump, small_signed_ints / 4, TEST_DATA_DIRECTORY "/regression/small_signed_ints.json", 4);
BENCHMARK_CAPTURE(Dump, jeopardy / -, "data/jeopardy/jeopardy.json", -1);
BENCHMARK_CAPTURE(Dump, jeopardy / 4, "data/jeopardy/jeopardy.json", 4);
BENCHMARK_CAPTURE(Dump, canada / -, "data/nativejson-benchmark/canada.json", -1);
BENCHMARK_CAPTURE(Dump, canada / 4, "data/nativejson-benchmark/canada.json", 4);
BENCHMARK_CAPTURE(Dump, citm_catalog / -, "data/nativejson-benchmark/citm_catalog.json", -1);
BENCHMARK_CAPTURE(Dump, citm_catalog / 4, "data/nativejson-benchmark/citm_catalog.json", 4);
BENCHMARK_CAPTURE(Dump, twitter / -, "data/nativejson-benchmark/twitter.json", -1);
BENCHMARK_CAPTURE(Dump, twitter / 4, "data/nativejson-benchmark/twitter.json", 4);
BENCHMARK_CAPTURE(Dump, floats / -, "data/numbers/floats.json", -1);
BENCHMARK_CAPTURE(Dump, floats / 4, "data/numbers/floats.json", 4);
BENCHMARK_CAPTURE(Dump, signed_ints / -, "data/numbers/signed_ints.json", -1);
BENCHMARK_CAPTURE(Dump, signed_ints / 4, "data/numbers/signed_ints.json", 4);
BENCHMARK_CAPTURE(Dump, unsigned_ints / -, "data/numbers/unsigned_ints.json", -1);
BENCHMARK_CAPTURE(Dump, unsigned_ints / 4, "data/numbers/unsigned_ints.json", 4);
BENCHMARK_MAIN();

3
benchmarks/thirdparty/benchmark/AUTHORS vendored Executable file → Normal file
View File

@@ -13,7 +13,6 @@ Arne Beer <arne@twobeer.de>
Carto
Christopher Seymour <chris.j.seymour@hotmail.com>
David Coeurjolly <david.coeurjolly@liris.cnrs.fr>
Deniz Evrenci <denizevrenci@gmail.com>
Dirac Research
Dominik Czarnota <dominik.b.czarnota@gmail.com>
Eric Fiselier <eric@efcs.ca>
@@ -32,7 +31,6 @@ Kishan Kumar <kumar.kishan@outlook.com>
Lei Xu <eddyxu@gmail.com>
Matt Clarkson <mattyclarkson@gmail.com>
Maxim Vafin <maxvafin@gmail.com>
MongoDB Inc.
Nick Hutchinson <nshutchinson@gmail.com>
Oleksandr Sochka <sasha.sochka@gmail.com>
Paul Redmond <paul.redmond@gmail.com>
@@ -40,7 +38,6 @@ Radoslav Yovchev <radoslav.tm@gmail.com>
Roman Lebedev <lebedev.ri@gmail.com>
Shuo Chen <chenshuo@chenshuo.com>
Steinar H. Gunderson <sgunderson@bigfoot.com>
Stripe, Inc.
Yixuan Qiu <yixuanq@gmail.com>
Yusuke Suzuki <utatane.tea@gmail.com>
Zbigniew Skowron <zbychs@gmail.com>

View File

@@ -1,42 +0,0 @@
licenses(["notice"])
config_setting(
name = "windows",
values = {
"cpu": "x64_windows",
},
visibility = [":__subpackages__"],
)
cc_library(
name = "benchmark",
srcs = glob(
[
"src/*.cc",
"src/*.h",
],
exclude = ["src/benchmark_main.cc"],
),
hdrs = ["include/benchmark/benchmark.h"],
linkopts = select({
":windows": ["-DEFAULTLIB:shlwapi.lib"],
"//conditions:default": ["-pthread"],
}),
strip_include_prefix = "include",
visibility = ["//visibility:public"],
)
cc_library(
name = "benchmark_main",
srcs = ["src/benchmark_main.cc"],
hdrs = ["include/benchmark/benchmark.h"],
strip_include_prefix = "include",
visibility = ["//visibility:public"],
deps = [":benchmark"],
)
cc_library(
name = "benchmark_internal_headers",
hdrs = glob(["src/*.h"]),
visibility = ["//test:__pkg__"],
)

40
benchmarks/thirdparty/benchmark/CMakeLists.txt vendored Executable file → Normal file
View File

@@ -27,48 +27,10 @@ option(BENCHMARK_DOWNLOAD_DEPENDENCIES "Allow the downloading and in-tree buildi
# in cases where it is not possible to build or find a valid version of gtest.
option(BENCHMARK_ENABLE_GTEST_TESTS "Enable building the unit tests which depend on gtest" ON)
set(ENABLE_ASSEMBLY_TESTS_DEFAULT OFF)
function(should_enable_assembly_tests)
if(CMAKE_BUILD_TYPE)
string(TOLOWER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_LOWER)
if (${CMAKE_BUILD_TYPE_LOWER} MATCHES "coverage")
# FIXME: The --coverage flag needs to be removed when building assembly
# tests for this to work.
return()
endif()
endif()
if (MSVC)
return()
elseif(NOT CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64")
return()
elseif(NOT CMAKE_SIZEOF_VOID_P EQUAL 8)
# FIXME: Make these work on 32 bit builds
return()
elseif(BENCHMARK_BUILD_32_BITS)
# FIXME: Make these work on 32 bit builds
return()
endif()
find_program(LLVM_FILECHECK_EXE FileCheck)
if (LLVM_FILECHECK_EXE)
set(LLVM_FILECHECK_EXE "${LLVM_FILECHECK_EXE}" CACHE PATH "llvm filecheck" FORCE)
message(STATUS "LLVM FileCheck Found: ${LLVM_FILECHECK_EXE}")
else()
message(STATUS "Failed to find LLVM FileCheck")
return()
endif()
set(ENABLE_ASSEMBLY_TESTS_DEFAULT ON PARENT_SCOPE)
endfunction()
should_enable_assembly_tests()
# This option disables the building and running of the assembly verification tests
option(BENCHMARK_ENABLE_ASSEMBLY_TESTS "Enable building and running the assembly tests"
${ENABLE_ASSEMBLY_TESTS_DEFAULT})
# Make sure we can import out CMake functions
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules")
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake")
# Read the git tags to determine the project version
include(GetGitVersion)
get_git_version(GIT_VERSION)
@@ -216,7 +178,7 @@ if (BENCHMARK_USE_LIBCXX)
# linker flags appear before all linker inputs and -lc++ must appear after.
list(APPEND BENCHMARK_CXX_LIBRARIES c++)
else()
message(FATAL_ERROR "-DBENCHMARK_USE_LIBCXX:BOOL=ON is not supported for compiler")
message(FATAL "-DBENCHMARK_USE_LIBCXX:BOOL=ON is not supported for compiler")
endif()
endif(BENCHMARK_USE_LIBCXX)

View File

@@ -1,58 +0,0 @@
# How to contribute #
We'd love to accept your patches and contributions to this project. There are
a just a few small guidelines you need to follow.
## Contributor License Agreement ##
Contributions to any Google project must be accompanied by a Contributor
License Agreement. This is not a copyright **assignment**, it simply gives
Google permission to use and redistribute your contributions as part of the
project.
* If you are an individual writing original source code and you're sure you
own the intellectual property, then you'll need to sign an [individual
CLA][].
* If you work for a company that wants to allow you to contribute your work,
then you'll need to sign a [corporate CLA][].
You generally only need to submit a CLA once, so if you've already submitted
one (even if it was for a different project), you probably don't need to do it
again.
[individual CLA]: https://developers.google.com/open-source/cla/individual
[corporate CLA]: https://developers.google.com/open-source/cla/corporate
Once your CLA is submitted (or if you already submitted one for
another Google project), make a commit adding yourself to the
[AUTHORS][] and [CONTRIBUTORS][] files. This commit can be part
of your first [pull request][].
[AUTHORS]: AUTHORS
[CONTRIBUTORS]: CONTRIBUTORS
## Submitting a patch ##
1. It's generally best to start by opening a new issue describing the bug or
feature you're intending to fix. Even if you think it's relatively minor,
it's helpful to know what people are working on. Mention in the initial
issue that you are planning to work on that bug or feature so that it can
be assigned to you.
1. Follow the normal process of [forking][] the project, and setup a new
branch to work in. It's important that each group of changes be done in
separate branches in order to ensure that a pull request only includes the
commits related to that bug or feature.
1. Do your best to have [well-formed commit messages][] for each change.
This provides consistency throughout the project, and ensures that commit
messages are able to be formatted properly by various git tools.
1. Finally, push the commits to your fork and submit a [pull request][].
[forking]: https://help.github.com/articles/fork-a-repo
[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html
[pull request]: https://help.github.com/articles/creating-a-pull-request

3
benchmarks/thirdparty/benchmark/CONTRIBUTORS vendored Executable file → Normal file
View File

@@ -28,7 +28,6 @@ Billy Robert O'Neal III <billy.oneal@gmail.com> <bion@microsoft.com>
Chris Kennelly <ckennelly@google.com> <ckennelly@ckennelly.com>
Christopher Seymour <chris.j.seymour@hotmail.com>
David Coeurjolly <david.coeurjolly@liris.cnrs.fr>
Deniz Evrenci <denizevrenci@gmail.com>
Dominic Hamon <dma@stripysock.com> <dominic@google.com>
Dominik Czarnota <dominik.b.czarnota@gmail.com>
Eric Fiselier <eric@efcs.ca>
@@ -39,7 +38,6 @@ Ismael Jimenez Martinez <ismael.jimenez.martinez@gmail.com>
Jern-Kuan Leong <jernkuan@gmail.com>
JianXiong Zhou <zhoujianxiong2@gmail.com>
Joao Paulo Magalhaes <joaoppmagalhaes@gmail.com>
John Millikin <jmillikin@stripe.com>
Jussi Knuuttila <jussi.knuuttila@gmail.com>
Kai Wolf <kai.wolf@gmail.com>
Kishan Kumar <kumar.kishan@outlook.com>
@@ -55,7 +53,6 @@ Pierre Phaneuf <pphaneuf@google.com>
Radoslav Yovchev <radoslav.tm@gmail.com>
Raul Marin <rmrodriguez@cartodb.com>
Ray Glover <ray.glover@uk.ibm.com>
Robert Guo <robert.guo@mongodb.com>
Roman Lebedev <lebedev.ri@gmail.com>
Shuo Chen <chenshuo@chenshuo.com>
Tobias Ulvgård <tobias.ulvgard@dirac.se>

0
benchmarks/thirdparty/benchmark/LICENSE vendored Executable file → Normal file
View File

39
benchmarks/thirdparty/benchmark/README.md vendored Executable file → Normal file
View File

@@ -14,8 +14,6 @@ IRC channel: https://freenode.net #googlebenchmark
[Additional Tooling Documentation](docs/tools.md)
[Assembly Testing Documentation](docs/AssemblyTests.md)
## Building
@@ -23,7 +21,7 @@ The basic steps for configuring and building the library look like this:
```bash
$ git clone https://github.com/google/benchmark.git
# Benchmark requires Google Test as a dependency. Add the source tree as a subdirectory.
# Benchmark requires GTest as a dependency. Add the source tree as a subdirectory.
$ git clone https://github.com/google/googletest.git benchmark/googletest
$ mkdir build && cd build
$ cmake -G <generator> [options] ../benchmark
@@ -31,13 +29,15 @@ $ cmake -G <generator> [options] ../benchmark
$ make
```
Note that Google Benchmark requires Google Test to build and run the tests. This
dependency can be provided two ways:
Note that Google Benchmark requires GTest to build and run the tests. This
dependency can be provided three ways:
* Checkout the Google Test sources into `benchmark/googletest` as above.
* Checkout the GTest sources into `benchmark/googletest`.
* Otherwise, if `-DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON` is specified during
configuration, the library will automatically download and build any required
dependencies.
* Otherwise, if nothing is done, CMake will use `find_package(GTest REQUIRED)`
to resolve the required GTest dependency.
If you do not wish to build and run the tests, add `-DBENCHMARK_ENABLE_GTEST_TESTS=OFF`
to `CMAKE_ARGS`.
@@ -59,7 +59,6 @@ Now, let's clone the repository and build it
```
git clone https://github.com/google/benchmark.git
cd benchmark
git clone https://github.com/google/googletest.git
mkdir build
cd build
cmake .. -DCMAKE_BUILD_TYPE=RELEASE
@@ -72,7 +71,7 @@ We need to install the library globally now
sudo make install
```
Now you have google/benchmark installed in your machine
Now you have google/benchmark installed in your machine
Note: Don't forget to link to pthread library while building
## Stable and Experimental Library Versions
@@ -87,11 +86,6 @@ to use, test, and provide feedback on the new features are encouraged to try
this branch. However, this branch provides no stability guarantees and reserves
the right to change and break the API at any time.
##Prerequisite knowledge
Before attempting to understand this framework one should ideally have some familiarity with the structure and format of the Google Test framework, upon which it is based. Documentation for Google Test, including a "Getting Started" (primer) guide, is available here:
https://github.com/google/googletest/blob/master/googletest/docs/Documentation.md
## Example usage
### Basic usage
@@ -118,10 +112,7 @@ BENCHMARK(BM_StringCopy);
BENCHMARK_MAIN();
```
Don't forget to inform your linker to add benchmark library e.g. through
`-lbenchmark` compilation flag. Alternatively, you may leave out the
`BENCHMARK_MAIN();` at the end of the source file and link against
`-lbenchmark_main` to get the same default behavior.
Don't forget to inform your linker to add benchmark library e.g. through `-lbenchmark` compilation flag.
The benchmark library will reporting the timing for the code within the `for(...)` loop.
@@ -830,7 +821,7 @@ BM_SetInsert/1024/10 33157 33648 21431 1.13369M
The JSON format outputs human readable json split into two top level attributes.
The `context` attribute contains information about the run in general, including
information about the CPU and the date.
The `benchmarks` attribute contains a list of every benchmark run. Example json
The `benchmarks` attribute contains a list of ever benchmark run. Example json
output looks like:
```json
{
@@ -902,11 +893,8 @@ If you are using gcc, you might need to set `GCC_AR` and `GCC_RANLIB` cmake cach
If you are using clang, you may need to set `LLVMAR_EXECUTABLE`, `LLVMNM_EXECUTABLE` and `LLVMRANLIB_EXECUTABLE` cmake cache variables.
## Linking against the library
When the library is built using GCC it is necessary to link with `-pthread`,
due to how GCC implements `std::thread`.
For GCC 4.x failing to link to pthreads will lead to runtime exceptions, not linker errors.
When using gcc, it is necessary to link against pthread to avoid runtime exceptions.
This is due to how gcc implements std::thread.
See [issue #67](https://github.com/google/benchmark/issues/67) for more details.
## Compiler Support
@@ -940,11 +928,8 @@ sudo cpupower frequency-set --governor powersave
# Known Issues
### Windows with CMake
### Windows
* Users must manually link `shlwapi.lib`. Failure to do so may result
in unresolved symbols.
### Solaris
* Users must explicitly link with kstat library (-lkstat compilation flag).

View File

@@ -1,7 +0,0 @@
workspace(name = "com_github_google_benchmark")
http_archive(
name = "com_google_googletest",
urls = ["https://github.com/google/googletest/archive/3f0cf6b62ad1eb50d8736538363d3580dd640c3e.zip"],
strip_prefix = "googletest-3f0cf6b62ad1eb50d8736538363d3580dd640c3e",
)

View File

@@ -1,56 +0,0 @@
version: '{build}'
image: Visual Studio 2017
configuration:
- Debug
- Release
environment:
matrix:
- compiler: msvc-15-seh
generator: "Visual Studio 15 2017"
- compiler: msvc-15-seh
generator: "Visual Studio 15 2017 Win64"
- compiler: msvc-14-seh
generator: "Visual Studio 14 2015"
- compiler: msvc-14-seh
generator: "Visual Studio 14 2015 Win64"
- compiler: msvc-12-seh
generator: "Visual Studio 12 2013"
- compiler: msvc-12-seh
generator: "Visual Studio 12 2013 Win64"
- compiler: gcc-5.3.0-posix
generator: "MinGW Makefiles"
cxx_path: 'C:\mingw-w64\i686-5.3.0-posix-dwarf-rt_v4-rev0\mingw32\bin'
APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
matrix:
fast_finish: true
install:
# git bash conflicts with MinGW makefiles
- if "%generator%"=="MinGW Makefiles" (set "PATH=%PATH:C:\Program Files\Git\usr\bin;=%")
- if not "%cxx_path%"=="" (set "PATH=%PATH%;%cxx_path%")
build_script:
- md _build -Force
- cd _build
- echo %configuration%
- cmake -G "%generator%" "-DCMAKE_BUILD_TYPE=%configuration%" -DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON ..
- cmake --build . --config %configuration%
test_script:
- ctest -c %configuration% --timeout 300 --output-on-failure
artifacts:
- path: '_build/CMakeFiles/*.log'
name: logs
- path: '_build/Testing/**/*.xml'
name: test_results

10
benchmarks/thirdparty/benchmark/cmake/AddCXXCompilerFlag.cmake vendored Executable file → Normal file
View File

@@ -62,13 +62,3 @@ function(add_required_cxx_compiler_flag FLAG)
message(FATAL_ERROR "Required flag '${FLAG}' is not supported by the compiler")
endif()
endfunction()
function(check_cxx_warning_flag FLAG)
mangle_compiler_flag("${FLAG}" MANGLED_FLAG)
set(OLD_CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS}")
# Add -Werror to ensure the compiler generates an error if the warning flag
# doesn't exist.
set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -Werror ${FLAG}")
check_cxx_compiler_flag("${FLAG}" ${MANGLED_FLAG})
set(CMAKE_REQUIRED_FLAGS "${OLD_CMAKE_REQUIRED_FLAGS}")
endfunction()

36
benchmarks/thirdparty/benchmark/cmake/CXXFeatureCheck.cmake vendored Executable file → Normal file
View File

@@ -27,27 +27,25 @@ function(cxx_feature_check FILE)
return()
endif()
if (NOT DEFINED COMPILE_${FEATURE})
message("-- Performing Test ${FEATURE}")
if(CMAKE_CROSSCOMPILING)
try_compile(COMPILE_${FEATURE}
${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp
CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}
LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES})
if(COMPILE_${FEATURE})
message(WARNING
"If you see build failures due to cross compilation, try setting HAVE_${VAR} to 0")
set(RUN_${FEATURE} 0)
else()
set(RUN_${FEATURE} 1)
endif()
message("-- Performing Test ${FEATURE}")
if(CMAKE_CROSSCOMPILING)
try_compile(COMPILE_${FEATURE}
${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp
CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}
LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES})
if(COMPILE_${FEATURE})
message(WARNING
"If you see build failures due to cross compilation, try setting HAVE_${VAR} to 0")
set(RUN_${FEATURE} 0)
else()
message("-- Performing Test ${FEATURE}")
try_run(RUN_${FEATURE} COMPILE_${FEATURE}
${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp
CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}
LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES})
set(RUN_${FEATURE} 1)
endif()
else()
message("-- Performing Test ${FEATURE}")
try_run(RUN_${FEATURE} COMPILE_${FEATURE}
${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp
CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}
LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES})
endif()
if(RUN_${FEATURE} EQUAL 0)

0
benchmarks/thirdparty/benchmark/cmake/Config.cmake.in vendored Executable file → Normal file
View File

3
benchmarks/thirdparty/benchmark/cmake/GetGitVersion.cmake vendored Executable file → Normal file
View File

@@ -21,7 +21,6 @@ set(__get_git_version INCLUDED)
function(get_git_version var)
if(GIT_EXECUTABLE)
execute_process(COMMAND ${GIT_EXECUTABLE} describe --match "v[0-9]*.[0-9]*.[0-9]*" --abbrev=8
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
RESULT_VARIABLE status
OUTPUT_VARIABLE GIT_VERSION
ERROR_QUIET)
@@ -34,11 +33,9 @@ function(get_git_version var)
# Work out if the repository is dirty
execute_process(COMMAND ${GIT_EXECUTABLE} update-index -q --refresh
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
OUTPUT_QUIET
ERROR_QUIET)
execute_process(COMMAND ${GIT_EXECUTABLE} diff-index --name-only HEAD --
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
OUTPUT_VARIABLE GIT_DIFF_INDEX
ERROR_QUIET)
string(COMPARE NOTEQUAL "${GIT_DIFF_INDEX}" "" GIT_DIRTY)

78
benchmarks/thirdparty/benchmark/cmake/HandleGTest.cmake vendored Executable file → Normal file
View File

@@ -1,5 +1,7 @@
include(split_list)
macro(split_list listname)
string(REPLACE ";" " " ${listname} "${${listname}}")
endmacro()
macro(build_external_gtest)
include(ExternalProject)
@@ -21,22 +23,9 @@ macro(build_external_gtest)
if ("${GTEST_BUILD_TYPE}" STREQUAL "COVERAGE")
set(GTEST_BUILD_TYPE "DEBUG")
endif()
# FIXME: Since 10/Feb/2017 the googletest trunk has had a bug where
# -Werror=unused-function fires during the build on OS X. This is a temporary
# workaround to keep our travis bots from failing. It should be removed
# once gtest is fixed.
if (NOT "${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC")
list(APPEND GTEST_FLAGS "-Wno-unused-function")
endif()
split_list(GTEST_FLAGS)
set(EXCLUDE_FROM_ALL_OPT "")
set(EXCLUDE_FROM_ALL_VALUE "")
if (${CMAKE_VERSION} VERSION_GREATER "3.0.99")
set(EXCLUDE_FROM_ALL_OPT "EXCLUDE_FROM_ALL")
set(EXCLUDE_FROM_ALL_VALUE "ON")
endif()
ExternalProject_Add(googletest
${EXCLUDE_FROM_ALL_OPT} ${EXCLUDE_FROM_ALL_VALUE}
EXCLUDE_FROM_ALL ON
GIT_REPOSITORY https://github.com/google/googletest.git
GIT_TAG master
PREFIX "${CMAKE_BINARY_DIR}/googletest"
@@ -46,68 +35,45 @@ macro(build_external_gtest)
-DCMAKE_C_COMPILER:STRING=${CMAKE_C_COMPILER}
-DCMAKE_CXX_COMPILER:STRING=${CMAKE_CXX_COMPILER}
-DCMAKE_INSTALL_PREFIX:PATH=<INSTALL_DIR>
-DCMAKE_INSTALL_LIBDIR:PATH=<INSTALL_DIR>/lib
-DCMAKE_CXX_FLAGS:STRING=${GTEST_FLAGS}
-Dgtest_force_shared_crt:BOOL=ON
)
ExternalProject_Get_Property(googletest install_dir)
set(GTEST_INCLUDE_DIRS ${install_dir}/include)
file(MAKE_DIRECTORY ${GTEST_INCLUDE_DIRS})
add_library(gtest UNKNOWN IMPORTED)
add_library(gtest_main UNKNOWN IMPORTED)
set(LIB_SUFFIX "${CMAKE_STATIC_LIBRARY_SUFFIX}")
set(LIB_PREFIX "${CMAKE_STATIC_LIBRARY_PREFIX}")
if("${GTEST_BUILD_TYPE}" STREQUAL "DEBUG")
set(LIB_SUFFIX "d${CMAKE_STATIC_LIBRARY_SUFFIX}")
endif()
# Use gmock_main instead of gtest_main because it initializes gtest as well.
# Note: The libraries are listed in reverse order of their dependancies.
foreach(LIB gtest gmock gmock_main)
add_library(${LIB} UNKNOWN IMPORTED)
set_target_properties(${LIB} PROPERTIES
IMPORTED_LOCATION ${install_dir}/lib/${LIB_PREFIX}${LIB}${LIB_SUFFIX}
INTERFACE_INCLUDE_DIRECTORIES ${GTEST_INCLUDE_DIRS}
INTERFACE_LINK_LIBRARIES "${GTEST_BOTH_LIBRARIES}"
)
add_dependencies(${LIB} googletest)
list(APPEND GTEST_BOTH_LIBRARIES ${LIB})
endforeach()
file(MAKE_DIRECTORY ${install_dir}/include)
set_target_properties(gtest PROPERTIES
IMPORTED_LOCATION ${install_dir}/lib/${LIB_PREFIX}gtest${LIB_SUFFIX}
INTERFACE_INCLUDE_DIRECTORIES ${install_dir}/include
)
set_target_properties(gtest_main PROPERTIES
IMPORTED_LOCATION ${install_dir}/lib/${LIB_PREFIX}gtest_main${LIB_SUFFIX}
INTERFACE_INCLUDE_DIRECTORIES ${install_dir}/include
)
add_dependencies(gtest googletest)
add_dependencies(gtest_main googletest)
set(GTEST_BOTH_LIBRARIES gtest gtest_main)
#set(GTEST_INCLUDE_DIRS ${install_dir}/include)
endmacro(build_external_gtest)
if (BENCHMARK_ENABLE_GTEST_TESTS)
if (IS_DIRECTORY ${CMAKE_SOURCE_DIR}/googletest)
set(GTEST_ROOT "${CMAKE_SOURCE_DIR}/googletest")
set(INSTALL_GTEST OFF CACHE INTERNAL "")
set(INSTALL_GMOCK OFF CACHE INTERNAL "")
add_subdirectory(${CMAKE_SOURCE_DIR}/googletest)
set(GTEST_BOTH_LIBRARIES gtest gmock gmock_main)
foreach(HEADER test mock)
# CMake 2.8 and older don't respect INTERFACE_INCLUDE_DIRECTORIES, so we
# have to add the paths ourselves.
set(HFILE g${HEADER}/g${HEADER}.h)
set(HPATH ${GTEST_ROOT}/google${HEADER}/include)
find_path(HEADER_PATH_${HEADER} ${HFILE}
NO_DEFAULT_PATHS
HINTS ${HPATH}
)
if (NOT HEADER_PATH_${HEADER})
message(FATAL_ERROR "Failed to find header ${HFILE} in ${HPATH}")
endif()
list(APPEND GTEST_INCLUDE_DIRS ${HEADER_PATH_${HEADER}})
endforeach()
set(GTEST_BOTH_LIBRARIES gtest gtest_main)
elseif(BENCHMARK_DOWNLOAD_DEPENDENCIES)
build_external_gtest()
else()
find_package(GTest REQUIRED)
find_path(GMOCK_INCLUDE_DIRS gmock/gmock.h
HINTS ${GTEST_INCLUDE_DIRS})
if (NOT GMOCK_INCLUDE_DIRS)
message(FATAL_ERROR "Failed to find header gmock/gmock.h with hint ${GTEST_INCLUDE_DIRS}")
endif()
set(GTEST_INCLUDE_DIRS ${GTEST_INCLUDE_DIRS} ${GMOCK_INCLUDE_DIRS})
# FIXME: We don't currently require the gmock library to build the tests,
# and it's likely we won't find it, so we don't try. As long as we've
# found the gmock/gmock.h header and gtest_main that should be good enough.
endif()
endif()

0
benchmarks/thirdparty/benchmark/cmake/Modules/FindLLVMAr.cmake vendored Executable file → Normal file
View File

0
benchmarks/thirdparty/benchmark/cmake/Modules/FindLLVMNm.cmake vendored Executable file → Normal file
View File

0
benchmarks/thirdparty/benchmark/cmake/Modules/FindLLVMRanLib.cmake vendored Executable file → Normal file
View File

0
benchmarks/thirdparty/benchmark/cmake/benchmark.pc.in vendored Executable file → Normal file
View File

0
benchmarks/thirdparty/benchmark/cmake/gnu_posix_regex.cpp vendored Executable file → Normal file
View File

0
benchmarks/thirdparty/benchmark/cmake/llvm-toolchain.cmake vendored Executable file → Normal file
View File

0
benchmarks/thirdparty/benchmark/cmake/posix_regex.cpp vendored Executable file → Normal file
View File

View File

@@ -1,3 +0,0 @@
macro(split_list listname)
string(REPLACE ";" " " ${listname} "${${listname}}")
endmacro()

0
benchmarks/thirdparty/benchmark/cmake/std_regex.cpp vendored Executable file → Normal file
View File

0
benchmarks/thirdparty/benchmark/cmake/steady_clock.cpp vendored Executable file → Normal file
View File

0
benchmarks/thirdparty/benchmark/cmake/thread_safety_attributes.cpp vendored Executable file → Normal file
View File

View File

@@ -1,147 +0,0 @@
# Assembly Tests
The Benchmark library provides a number of functions whose primary
purpose in to affect assembly generation, including `DoNotOptimize`
and `ClobberMemory`. In addition there are other functions,
such as `KeepRunning`, for which generating good assembly is paramount.
For these functions it's important to have tests that verify the
correctness and quality of the implementation. This requires testing
the code generated by the compiler.
This document describes how the Benchmark library tests compiler output,
as well as how to properly write new tests.
## Anatomy of a Test
Writing a test has two steps:
* Write the code you want to generate assembly for.
* Add `// CHECK` lines to match against the verified assembly.
Example:
```c++
// CHECK-LABEL: test_add:
extern "C" int test_add() {
extern int ExternInt;
return ExternInt + 1;
// CHECK: movl ExternInt(%rip), %eax
// CHECK: addl %eax
// CHECK: ret
}
```
#### LLVM Filecheck
[LLVM's Filecheck](https://llvm.org/docs/CommandGuide/FileCheck.html)
is used to test the generated assembly against the `// CHECK` lines
specified in the tests source file. Please see the documentation
linked above for information on how to write `CHECK` directives.
#### Tips and Tricks:
* Tests should match the minimal amount of output required to establish
correctness. `CHECK` directives don't have to match on the exact next line
after the previous match, so tests should omit checks for unimportant
bits of assembly. ([`CHECK-NEXT`](https://llvm.org/docs/CommandGuide/FileCheck.html#the-check-next-directive)
can be used to ensure a match occurs exactly after the previous match).
* The tests are compiled with `-O3 -g0`. So we're only testing the
optimized output.
* The assembly output is further cleaned up using `tools/strip_asm.py`.
This removes comments, assembler directives, and unused labels before
the test is run.
* The generated and stripped assembly file for a test is output under
`<build-directory>/test/<test-name>.s`
* Filecheck supports using [`CHECK` prefixes](https://llvm.org/docs/CommandGuide/FileCheck.html#cmdoption-check-prefixes)
to specify lines that should only match in certain situations.
The Benchmark tests use `CHECK-CLANG` and `CHECK-GNU` for lines that
are only expected to match Clang or GCC's output respectively. Normal
`CHECK` lines match against all compilers. (Note: `CHECK-NOT` and
`CHECK-LABEL` are NOT prefixes. They are versions of non-prefixed
`CHECK` lines)
* Use `extern "C"` to disable name mangling for specific functions. This
makes them easier to name in the `CHECK` lines.
## Problems Writing Portable Tests
Writing tests which check the code generated by a compiler are
inherently non-portable. Different compilers and even different compiler
versions may generate entirely different code. The Benchmark tests
must tolerate this.
LLVM Filecheck provides a number of mechanisms to help write
"more portable" tests; including [matching using regular expressions](https://llvm.org/docs/CommandGuide/FileCheck.html#filecheck-pattern-matching-syntax),
allowing the creation of [named variables](https://llvm.org/docs/CommandGuide/FileCheck.html#filecheck-variables)
for later matching, and [checking non-sequential matches](https://llvm.org/docs/CommandGuide/FileCheck.html#the-check-dag-directive).
#### Capturing Variables
For example, say GCC stores a variable in a register but Clang stores
it in memory. To write a test that tolerates both cases we "capture"
the destination of the store, and then use the captured expression
to write the remainder of the test.
```c++
// CHECK-LABEL: test_div_no_op_into_shr:
extern "C" void test_div_no_op_into_shr(int value) {
int divisor = 2;
benchmark::DoNotOptimize(divisor); // hide the value from the optimizer
return value / divisor;
// CHECK: movl $2, [[DEST:.*]]
// CHECK: idivl [[DEST]]
// CHECK: ret
}
```
#### Using Regular Expressions to Match Differing Output
Often tests require testing assembly lines which may subtly differ
between compilers or compiler versions. A common example of this
is matching stack frame addresses. In this case regular expressions
can be used to match the differing bits of output. For example:
```c++
int ExternInt;
struct Point { int x, y, z; };
// CHECK-LABEL: test_store_point:
extern "C" void test_store_point() {
Point p{ExternInt, ExternInt, ExternInt};
benchmark::DoNotOptimize(p);
// CHECK: movl ExternInt(%rip), %eax
// CHECK: movl %eax, -{{[0-9]+}}(%rsp)
// CHECK: movl %eax, -{{[0-9]+}}(%rsp)
// CHECK: movl %eax, -{{[0-9]+}}(%rsp)
// CHECK: ret
}
```
## Current Requirements and Limitations
The tests require Filecheck to be installed along the `PATH` of the
build machine. Otherwise the tests will be disabled.
Additionally, as mentioned in the previous section, codegen tests are
inherently non-portable. Currently the tests are limited to:
* x86_64 targets.
* Compiled with GCC or Clang
Further work could be done, at least on a limited basis, to extend the
tests to other architectures and compilers (using `CHECK` prefixes).
Furthermore, the tests fail for builds which specify additional flags
that modify code generation, including `--coverage` or `-fsanitize=`.

View File

@@ -1,242 +0,0 @@
# Benchmark Tools
## compare_bench.py
The `compare_bench.py` utility which can be used to compare the result of benchmarks.
The program is invoked like:
``` bash
$ compare_bench.py <old-benchmark> <new-benchmark> [benchmark options]...
```
Where `<old-benchmark>` and `<new-benchmark>` either specify a benchmark executable file, or a JSON output file. The type of the input file is automatically detected. If a benchmark executable is specified then the benchmark is run to obtain the results. Otherwise the results are simply loaded from the output file.
`[benchmark options]` will be passed to the benchmarks invocations. They can be anything that binary accepts, be it either normal `--benchmark_*` parameters, or some custom parameters your binary takes.
The sample output using the JSON test files under `Inputs/` gives:
``` bash
$ ./compare_bench.py ./gbench/Inputs/test1_run1.json ./gbench/Inputs/test1_run2.json
Comparing ./gbench/Inputs/test1_run1.json to ./gbench/Inputs/test1_run2.json
Benchmark Time CPU Time Old Time New CPU Old CPU New
-------------------------------------------------------------------------------------------------------------
BM_SameTimes +0.0000 +0.0000 10 10 10 10
BM_2xFaster -0.5000 -0.5000 50 25 50 25
BM_2xSlower +1.0000 +1.0000 50 100 50 100
BM_1PercentFaster -0.0100 -0.0100 100 99 100 99
BM_1PercentSlower +0.0100 +0.0100 100 101 100 101
BM_10PercentFaster -0.1000 -0.1000 100 90 100 90
BM_10PercentSlower +0.1000 +0.1000 100 110 100 110
BM_100xSlower +99.0000 +99.0000 100 10000 100 10000
BM_100xFaster -0.9900 -0.9900 10000 100 10000 100
BM_10PercentCPUToTime +0.1000 -0.1000 100 110 100 90
BM_ThirdFaster -0.3333 -0.3334 100 67 100 67
BM_BadTimeUnit -0.9000 +0.2000 0 0 0 1
```
As you can note, the values in `Time` and `CPU` columns are calculated as `(new - old) / |old|`.
When a benchmark executable is run, the raw output from the benchmark is printed in real time to stdout. The sample output using `benchmark/basic_test` for both arguments looks like:
```
./compare_bench.py test/basic_test test/basic_test --benchmark_filter=BM_empty.*
RUNNING: test/basic_test --benchmark_filter=BM_empty.* --benchmark_out=/tmp/tmpN7LF3a
Run on (8 X 4000 MHz CPU s)
2017-11-07 23:28:36
---------------------------------------------------------------------
Benchmark Time CPU Iterations
---------------------------------------------------------------------
BM_empty 4 ns 4 ns 170178757
BM_empty/threads:8 1 ns 7 ns 103868920
BM_empty_stop_start 0 ns 0 ns 1000000000
BM_empty_stop_start/threads:8 0 ns 0 ns 1403031720
RUNNING: /test/basic_test --benchmark_filter=BM_empty.* --benchmark_out=/tmp/tmplvrIp8
Run on (8 X 4000 MHz CPU s)
2017-11-07 23:28:38
---------------------------------------------------------------------
Benchmark Time CPU Iterations
---------------------------------------------------------------------
BM_empty 4 ns 4 ns 169534855
BM_empty/threads:8 1 ns 7 ns 104188776
BM_empty_stop_start 0 ns 0 ns 1000000000
BM_empty_stop_start/threads:8 0 ns 0 ns 1404159424
Comparing ../build/test/basic_test to ../build/test/basic_test
Benchmark Time CPU Time Old Time New CPU Old CPU New
---------------------------------------------------------------------------------------------------------------------
BM_empty -0.0048 -0.0049 4 4 4 4
BM_empty/threads:8 -0.0123 -0.0054 1 1 7 7
BM_empty_stop_start -0.0000 -0.0000 0 0 0 0
BM_empty_stop_start/threads:8 -0.0029 +0.0001 0 0 0 0
```
As you can note, the values in `Time` and `CPU` columns are calculated as `(new - old) / |old|`.
Obviously this example doesn't give any useful output, but it's intended to show the output format when 'compare_bench.py' needs to run benchmarks.
## compare.py
The `compare.py` can be used to compare the result of benchmarks.
There are three modes of operation:
1. Just compare two benchmarks, what `compare_bench.py` did.
The program is invoked like:
``` bash
$ compare.py benchmarks <benchmark_baseline> <benchmark_contender> [benchmark options]...
```
Where `<benchmark_baseline>` and `<benchmark_contender>` either specify a benchmark executable file, or a JSON output file. The type of the input file is automatically detected. If a benchmark executable is specified then the benchmark is run to obtain the results. Otherwise the results are simply loaded from the output file.
`[benchmark options]` will be passed to the benchmarks invocations. They can be anything that binary accepts, be it either normal `--benchmark_*` parameters, or some custom parameters your binary takes.
Example output:
```
$ ./compare.py benchmarks ./a.out ./a.out
RUNNING: ./a.out --benchmark_out=/tmp/tmprBT5nW
Run on (8 X 4000 MHz CPU s)
2017-11-07 21:16:44
------------------------------------------------------
Benchmark Time CPU Iterations
------------------------------------------------------
BM_memcpy/8 36 ns 36 ns 19101577 211.669MB/s
BM_memcpy/64 76 ns 76 ns 9412571 800.199MB/s
BM_memcpy/512 84 ns 84 ns 8249070 5.64771GB/s
BM_memcpy/1024 116 ns 116 ns 6181763 8.19505GB/s
BM_memcpy/8192 643 ns 643 ns 1062855 11.8636GB/s
BM_copy/8 222 ns 222 ns 3137987 34.3772MB/s
BM_copy/64 1608 ns 1608 ns 432758 37.9501MB/s
BM_copy/512 12589 ns 12589 ns 54806 38.7867MB/s
BM_copy/1024 25169 ns 25169 ns 27713 38.8003MB/s
BM_copy/8192 201165 ns 201112 ns 3486 38.8466MB/s
RUNNING: ./a.out --benchmark_out=/tmp/tmpt1wwG_
Run on (8 X 4000 MHz CPU s)
2017-11-07 21:16:53
------------------------------------------------------
Benchmark Time CPU Iterations
------------------------------------------------------
BM_memcpy/8 36 ns 36 ns 19397903 211.255MB/s
BM_memcpy/64 73 ns 73 ns 9691174 839.635MB/s
BM_memcpy/512 85 ns 85 ns 8312329 5.60101GB/s
BM_memcpy/1024 118 ns 118 ns 6438774 8.11608GB/s
BM_memcpy/8192 656 ns 656 ns 1068644 11.6277GB/s
BM_copy/8 223 ns 223 ns 3146977 34.2338MB/s
BM_copy/64 1611 ns 1611 ns 435340 37.8751MB/s
BM_copy/512 12622 ns 12622 ns 54818 38.6844MB/s
BM_copy/1024 25257 ns 25239 ns 27779 38.6927MB/s
BM_copy/8192 205013 ns 205010 ns 3479 38.108MB/s
Comparing ./a.out to ./a.out
Benchmark Time CPU Time Old Time New CPU Old CPU New
------------------------------------------------------------------------------------------------------
BM_memcpy/8 +0.0020 +0.0020 36 36 36 36
BM_memcpy/64 -0.0468 -0.0470 76 73 76 73
BM_memcpy/512 +0.0081 +0.0083 84 85 84 85
BM_memcpy/1024 +0.0098 +0.0097 116 118 116 118
BM_memcpy/8192 +0.0200 +0.0203 643 656 643 656
BM_copy/8 +0.0046 +0.0042 222 223 222 223
BM_copy/64 +0.0020 +0.0020 1608 1611 1608 1611
BM_copy/512 +0.0027 +0.0026 12589 12622 12589 12622
BM_copy/1024 +0.0035 +0.0028 25169 25257 25169 25239
BM_copy/8192 +0.0191 +0.0194 201165 205013 201112 205010
```
What it does is for the every benchmark from the first run it looks for the benchmark with exactly the same name in the second run, and then compares the results. If the names differ, the benchmark is omitted from the diff.
As you can note, the values in `Time` and `CPU` columns are calculated as `(new - old) / |old|`.
2. Compare two different filters of one benchmark
The program is invoked like:
``` bash
$ compare.py filters <benchmark> <filter_baseline> <filter_contender> [benchmark options]...
```
Where `<benchmark>` either specify a benchmark executable file, or a JSON output file. The type of the input file is automatically detected. If a benchmark executable is specified then the benchmark is run to obtain the results. Otherwise the results are simply loaded from the output file.
Where `<filter_baseline>` and `<filter_contender>` are the same regex filters that you would pass to the `[--benchmark_filter=<regex>]` parameter of the benchmark binary.
`[benchmark options]` will be passed to the benchmarks invocations. They can be anything that binary accepts, be it either normal `--benchmark_*` parameters, or some custom parameters your binary takes.
Example output:
```
$ ./compare.py filters ./a.out BM_memcpy BM_copy
RUNNING: ./a.out --benchmark_filter=BM_memcpy --benchmark_out=/tmp/tmpBWKk0k
Run on (8 X 4000 MHz CPU s)
2017-11-07 21:37:28
------------------------------------------------------
Benchmark Time CPU Iterations
------------------------------------------------------
BM_memcpy/8 36 ns 36 ns 17891491 211.215MB/s
BM_memcpy/64 74 ns 74 ns 9400999 825.646MB/s
BM_memcpy/512 87 ns 87 ns 8027453 5.46126GB/s
BM_memcpy/1024 111 ns 111 ns 6116853 8.5648GB/s
BM_memcpy/8192 657 ns 656 ns 1064679 11.6247GB/s
RUNNING: ./a.out --benchmark_filter=BM_copy --benchmark_out=/tmp/tmpAvWcOM
Run on (8 X 4000 MHz CPU s)
2017-11-07 21:37:33
----------------------------------------------------
Benchmark Time CPU Iterations
----------------------------------------------------
BM_copy/8 227 ns 227 ns 3038700 33.6264MB/s
BM_copy/64 1640 ns 1640 ns 426893 37.2154MB/s
BM_copy/512 12804 ns 12801 ns 55417 38.1444MB/s
BM_copy/1024 25409 ns 25407 ns 27516 38.4365MB/s
BM_copy/8192 202986 ns 202990 ns 3454 38.4871MB/s
Comparing BM_memcpy to BM_copy (from ./a.out)
Benchmark Time CPU Time Old Time New CPU Old CPU New
--------------------------------------------------------------------------------------------------------------------
[BM_memcpy vs. BM_copy]/8 +5.2829 +5.2812 36 227 36 227
[BM_memcpy vs. BM_copy]/64 +21.1719 +21.1856 74 1640 74 1640
[BM_memcpy vs. BM_copy]/512 +145.6487 +145.6097 87 12804 87 12801
[BM_memcpy vs. BM_copy]/1024 +227.1860 +227.1776 111 25409 111 25407
[BM_memcpy vs. BM_copy]/8192 +308.1664 +308.2898 657 202986 656 202990
```
As you can see, it applies filter to the benchmarks, both when running the benchmark, and before doing the diff. And to make the diff work, the matches are replaced with some common string. Thus, you can compare two different benchmark families within one benchmark binary.
As you can note, the values in `Time` and `CPU` columns are calculated as `(new - old) / |old|`.
3. Compare filter one from benchmark one to filter two from benchmark two:
The program is invoked like:
``` bash
$ compare.py filters <benchmark_baseline> <filter_baseline> <benchmark_contender> <filter_contender> [benchmark options]...
```
Where `<benchmark_baseline>` and `<benchmark_contender>` either specify a benchmark executable file, or a JSON output file. The type of the input file is automatically detected. If a benchmark executable is specified then the benchmark is run to obtain the results. Otherwise the results are simply loaded from the output file.
Where `<filter_baseline>` and `<filter_contender>` are the same regex filters that you would pass to the `[--benchmark_filter=<regex>]` parameter of the benchmark binary.
`[benchmark options]` will be passed to the benchmarks invocations. They can be anything that binary accepts, be it either normal `--benchmark_*` parameters, or some custom parameters your binary takes.
Example output:
```
$ ./compare.py benchmarksfiltered ./a.out BM_memcpy ./a.out BM_copy
RUNNING: ./a.out --benchmark_filter=BM_memcpy --benchmark_out=/tmp/tmp_FvbYg
Run on (8 X 4000 MHz CPU s)
2017-11-07 21:38:27
------------------------------------------------------
Benchmark Time CPU Iterations
------------------------------------------------------
BM_memcpy/8 37 ns 37 ns 18953482 204.118MB/s
BM_memcpy/64 74 ns 74 ns 9206578 828.245MB/s
BM_memcpy/512 91 ns 91 ns 8086195 5.25476GB/s
BM_memcpy/1024 120 ns 120 ns 5804513 7.95662GB/s
BM_memcpy/8192 664 ns 664 ns 1028363 11.4948GB/s
RUNNING: ./a.out --benchmark_filter=BM_copy --benchmark_out=/tmp/tmpDfL5iE
Run on (8 X 4000 MHz CPU s)
2017-11-07 21:38:32
----------------------------------------------------
Benchmark Time CPU Iterations
----------------------------------------------------
BM_copy/8 230 ns 230 ns 2985909 33.1161MB/s
BM_copy/64 1654 ns 1653 ns 419408 36.9137MB/s
BM_copy/512 13122 ns 13120 ns 53403 37.2156MB/s
BM_copy/1024 26679 ns 26666 ns 26575 36.6218MB/s
BM_copy/8192 215068 ns 215053 ns 3221 36.3283MB/s
Comparing BM_memcpy (from ./a.out) to BM_copy (from ./a.out)
Benchmark Time CPU Time Old Time New CPU Old CPU New
--------------------------------------------------------------------------------------------------------------------
[BM_memcpy vs. BM_copy]/8 +5.1649 +5.1637 37 230 37 230
[BM_memcpy vs. BM_copy]/64 +21.4352 +21.4374 74 1654 74 1653
[BM_memcpy vs. BM_copy]/512 +143.6022 +143.5865 91 13122 91 13120
[BM_memcpy vs. BM_copy]/1024 +221.5903 +221.4790 120 26679 120 26666
[BM_memcpy vs. BM_copy]/8192 +322.9059 +323.0096 664 215068 664 215053
```
This is a mix of the previous two modes, two (potentially different) benchmark binaries are run, and a different filter is applied to each one.
As you can note, the values in `Time` and `CPU` columns are calculated as `(new - old) / |old|`.

175
benchmarks/thirdparty/benchmark/include/benchmark/benchmark.h vendored Executable file → Normal file
View File

@@ -172,7 +172,6 @@ BENCHMARK(BM_test)->Unit(benchmark::kMillisecond);
#include <stdint.h>
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <iosfwd>
@@ -292,7 +291,7 @@ BENCHMARK_UNUSED static int stream_init_anchor = InitializeStreams();
#if (!defined(__GNUC__) && !defined(__clang__)) || defined(__pnacl__) || \
defined(__EMSCRIPTEN__)
defined(EMSCRIPTN)
# define BENCHMARK_HAS_NO_INLINE_ASSEMBLY
#endif
@@ -303,20 +302,15 @@ BENCHMARK_UNUSED static int stream_init_anchor = InitializeStreams();
// See: https://youtu.be/nXaxk27zwlk?t=2441
#ifndef BENCHMARK_HAS_NO_INLINE_ASSEMBLY
template <class Tp>
inline BENCHMARK_ALWAYS_INLINE
void DoNotOptimize(Tp const& value) {
asm volatile("" : : "r,m"(value) : "memory");
}
template <class Tp>
inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp& value) {
inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) {
// Clang doesn't like the 'X' constraint on `value` and certain GCC versions
// don't like the 'g' constraint. Attempt to placate them both.
#if defined(__clang__)
asm volatile("" : "+r,m"(value) : : "memory");
asm volatile("" : : "g"(value) : "memory");
#else
asm volatile("" : "+m,r"(value) : : "memory");
asm volatile("" : : "i,r,m"(value) : "memory");
#endif
}
// Force the compiler to flush pending writes to global memory. Acts as an
// effective read/write barrier
inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() {
@@ -385,7 +379,7 @@ enum BigO { oNone, o1, oN, oNSquared, oNCubed, oLogN, oNLogN, oAuto, oLambda };
// BigOFunc is passed to a benchmark in order to specify the asymptotic
// computational complexity for the benchmark.
typedef double(BigOFunc)(int64_t);
typedef double(BigOFunc)(int);
// StatisticsFunc is passed to a benchmark in order to compute some descriptive
// statistics over all the measurements of some type
@@ -435,19 +429,16 @@ class State {
// Returns true if the benchmark should continue through another iteration.
// NOTE: A benchmark may not return from the test until KeepRunning() has
// returned false.
bool KeepRunning();
// Returns true iff the benchmark should run n more iterations.
// REQUIRES: 'n' > 0.
// NOTE: A benchmark must not return from the test until KeepRunningBatch()
// has returned false.
// NOTE: KeepRunningBatch() may overshoot by up to 'n' iterations.
//
// Intended usage:
// while (state.KeepRunningBatch(1000)) {
// // process 1000 elements
// }
bool KeepRunningBatch(size_t n);
bool KeepRunning() {
if (BENCHMARK_BUILTIN_EXPECT(!started_, false)) {
StartKeepRunning();
}
bool const res = (--total_iterations_ != 0);
if (BENCHMARK_BUILTIN_EXPECT(!res, false)) {
FinishKeepRunning();
}
return res;
}
// REQUIRES: timer is running and 'SkipWithError(...)' has not been called
// by the current thread.
@@ -514,10 +505,10 @@ class State {
//
// REQUIRES: a benchmark has exited its benchmarking loop.
BENCHMARK_ALWAYS_INLINE
void SetBytesProcessed(int64_t bytes) { bytes_processed_ = bytes; }
void SetBytesProcessed(size_t bytes) { bytes_processed_ = bytes; }
BENCHMARK_ALWAYS_INLINE
int64_t bytes_processed() const { return bytes_processed_; }
size_t bytes_processed() const { return bytes_processed_; }
// If this routine is called with complexity_n > 0 and complexity report is
// requested for the
@@ -525,10 +516,10 @@ class State {
// and complexity_n will
// represent the length of N.
BENCHMARK_ALWAYS_INLINE
void SetComplexityN(int64_t complexity_n) { complexity_n_ = complexity_n; }
void SetComplexityN(int complexity_n) { complexity_n_ = complexity_n; }
BENCHMARK_ALWAYS_INLINE
int64_t complexity_length_n() { return complexity_n_; }
int complexity_length_n() { return complexity_n_; }
// If this routine is called with items > 0, then an items/s
// label is printed on the benchmark report line for the currently
@@ -537,10 +528,10 @@ class State {
//
// REQUIRES: a benchmark has exited its benchmarking loop.
BENCHMARK_ALWAYS_INLINE
void SetItemsProcessed(int64_t items) { items_processed_ = items; }
void SetItemsProcessed(size_t items) { items_processed_ = items; }
BENCHMARK_ALWAYS_INLINE
int64_t items_processed() const { return items_processed_; }
size_t items_processed() const { return items_processed_; }
// If this routine is called, the specified label is printed at the
// end of the benchmark report line for the currently executing
@@ -548,7 +539,7 @@ class State {
// static void BM_Compress(benchmark::State& state) {
// ...
// double compress = input_size / output_size;
// state.SetLabel(StrFormat("compress:%.1f%%", 100.0*compression));
// state.SetLabel(StringPrintf("compress:%.1f%%", 100.0*compression));
// }
// Produces output that looks like:
// BM_Compress 50 50 14115038 compress:27.3%
@@ -562,52 +553,34 @@ class State {
// Range arguments for this run. CHECKs if the argument has been set.
BENCHMARK_ALWAYS_INLINE
int64_t range(std::size_t pos = 0) const {
int range(std::size_t pos = 0) const {
assert(range_.size() > pos);
return range_[pos];
}
BENCHMARK_DEPRECATED_MSG("use 'range(0)' instead")
int64_t range_x() const { return range(0); }
int range_x() const { return range(0); }
BENCHMARK_DEPRECATED_MSG("use 'range(1)' instead")
int64_t range_y() const { return range(1); }
int range_y() const { return range(1); }
BENCHMARK_ALWAYS_INLINE
size_t iterations() const {
if (BENCHMARK_BUILTIN_EXPECT(!started_, false)) {
return 0;
}
return max_iterations - total_iterations_ + batch_leftover_;
}
size_t iterations() const { return (max_iterations - total_iterations_) + 1; }
private: // items we expect on the first cache line (ie 64 bytes of the struct)
// When total_iterations_ is 0, KeepRunning() and friends will return false.
// May be larger than max_iterations.
size_t total_iterations_;
// When using KeepRunningBatch(), batch_leftover_ holds the number of
// iterations beyond max_iters that were run. Used to track
// completed_iterations_ accurately.
size_t batch_leftover_;
public:
const size_t max_iterations;
private:
private:
bool started_;
bool finished_;
size_t total_iterations_;
std::vector<int> range_;
size_t bytes_processed_;
size_t items_processed_;
int complexity_n_;
bool error_occurred_;
private: // items we don't need on the first cache line
std::vector<int64_t> range_;
int64_t bytes_processed_;
int64_t items_processed_;
int64_t complexity_n_;
public:
// Container for user-defined counters.
UserCounters counters;
@@ -615,69 +588,27 @@ private: // items we don't need on the first cache line
const int thread_index;
// Number of threads concurrently executing the benchmark.
const int threads;
const size_t max_iterations;
// TODO(EricWF) make me private
State(size_t max_iters, const std::vector<int64_t>& ranges, int thread_i,
State(size_t max_iters, const std::vector<int>& ranges, int thread_i,
int n_threads, internal::ThreadTimer* timer,
internal::ThreadManager* manager);
private:
void StartKeepRunning();
// Implementation of KeepRunning() and KeepRunningBatch().
// is_batch must be true unless n is 1.
bool KeepRunningInternal(size_t n, bool is_batch);
void FinishKeepRunning();
internal::ThreadTimer* timer_;
internal::ThreadManager* manager_;
BENCHMARK_DISALLOW_COPY_AND_ASSIGN(State);
};
inline BENCHMARK_ALWAYS_INLINE
bool State::KeepRunning() {
return KeepRunningInternal(1, /*is_batch=*/ false);
}
inline BENCHMARK_ALWAYS_INLINE
bool State::KeepRunningBatch(size_t n) {
return KeepRunningInternal(n, /*is_batch=*/ true);
}
inline BENCHMARK_ALWAYS_INLINE
bool State::KeepRunningInternal(size_t n, bool is_batch) {
// total_iterations_ is set to 0 by the constructor, and always set to a
// nonzero value by StartKepRunning().
assert(n > 0);
// n must be 1 unless is_batch is true.
assert(is_batch || n == 1);
if (BENCHMARK_BUILTIN_EXPECT(total_iterations_ >= n, true)) {
total_iterations_ -= n;
return true;
}
if (!started_) {
StartKeepRunning();
if (!error_occurred_ && total_iterations_ >= n) {
total_iterations_-= n;
return true;
}
}
// For non-batch runs, total_iterations_ must be 0 by now.
if (is_batch && total_iterations_ != 0) {
batch_leftover_ = n - total_iterations_;
total_iterations_ = 0;
return true;
}
FinishKeepRunning();
return false;
}
struct State::StateIterator {
struct BENCHMARK_UNUSED Value {};
typedef std::forward_iterator_tag iterator_category;
typedef Value value_type;
typedef Value reference;
typedef Value pointer;
typedef std::ptrdiff_t difference_type;
private:
friend class State;
@@ -739,7 +670,7 @@ class Benchmark {
// Run this benchmark once with "x" as the extra argument passed
// to the function.
// REQUIRES: The function passed to the constructor must accept an arg1.
Benchmark* Arg(int64_t x);
Benchmark* Arg(int x);
// Run this benchmark with the given time unit for the generated output report
Benchmark* Unit(TimeUnit unit);
@@ -747,23 +678,23 @@ class Benchmark {
// Run this benchmark once for a number of values picked from the
// range [start..limit]. (start and limit are always picked.)
// REQUIRES: The function passed to the constructor must accept an arg1.
Benchmark* Range(int64_t start, int64_t limit);
Benchmark* Range(int start, int limit);
// Run this benchmark once for all values in the range [start..limit] with
// specific step
// REQUIRES: The function passed to the constructor must accept an arg1.
Benchmark* DenseRange(int64_t start, int64_t limit, int step = 1);
Benchmark* DenseRange(int start, int limit, int step = 1);
// Run this benchmark once with "args" as the extra arguments passed
// to the function.
// REQUIRES: The function passed to the constructor must accept arg1, arg2 ...
Benchmark* Args(const std::vector<int64_t>& args);
Benchmark* Args(const std::vector<int>& args);
// Equivalent to Args({x, y})
// NOTE: This is a legacy C++03 interface provided for compatibility only.
// New code should use 'Args'.
Benchmark* ArgPair(int64_t x, int64_t y) {
std::vector<int64_t> args;
Benchmark* ArgPair(int x, int y) {
std::vector<int> args;
args.push_back(x);
args.push_back(y);
return Args(args);
@@ -772,7 +703,7 @@ class Benchmark {
// Run this benchmark once for a number of values picked from the
// ranges [start..limit]. (starts and limits are always picked.)
// REQUIRES: The function passed to the constructor must accept arg1, arg2 ...
Benchmark* Ranges(const std::vector<std::pair<int64_t, int64_t> >& ranges);
Benchmark* Ranges(const std::vector<std::pair<int, int> >& ranges);
// Equivalent to ArgNames({name})
Benchmark* ArgName(const std::string& name);
@@ -784,8 +715,8 @@ class Benchmark {
// Equivalent to Ranges({{lo1, hi1}, {lo2, hi2}}).
// NOTE: This is a legacy C++03 interface provided for compatibility only.
// New code should use 'Ranges'.
Benchmark* RangePair(int64_t lo1, int64_t hi1, int64_t lo2, int64_t hi2) {
std::vector<std::pair<int64_t, int64_t> > ranges;
Benchmark* RangePair(int lo1, int hi1, int lo2, int hi2) {
std::vector<std::pair<int, int> > ranges;
ranges.push_back(std::make_pair(lo1, hi1));
ranges.push_back(std::make_pair(lo2, hi2));
return Ranges(ranges);
@@ -892,13 +823,15 @@ class Benchmark {
int ArgsCnt() const;
static void AddRange(std::vector<int>* dst, int lo, int hi, int mult);
private:
friend class BenchmarkFamilies;
std::string name_;
ReportMode report_mode_;
std::vector<std::string> arg_names_; // Args for all benchmark runs
std::vector<std::vector<int64_t> > args_; // Args for all benchmark runs
std::vector<std::vector<int> > args_; // Args for all benchmark runs
TimeUnit time_unit_;
int range_multiplier_;
double min_time_;
@@ -1253,7 +1186,7 @@ class BenchmarkReporter {
CPUInfo const& cpu_info;
// The number of chars in the longest benchmark name.
size_t name_field_width;
static const char *executable_name;
Context();
};
@@ -1306,7 +1239,7 @@ class BenchmarkReporter {
// Keep track of arguments to compute asymptotic complexity
BigO complexity;
BigOFunc* complexity_lambda;
int64_t complexity_n;
int complexity_n;
// what statistics to compute from the measurements
const std::vector<Statistics>* statistics;

View File

@@ -1,320 +0,0 @@
#! /usr/bin/env python
# encoding: utf-8
import argparse
import errno
import logging
import os
import platform
import re
import sys
import subprocess
import tempfile
try:
import winreg
except ImportError:
import _winreg as winreg
try:
import urllib.request as request
except ImportError:
import urllib as request
try:
import urllib.parse as parse
except ImportError:
import urlparse as parse
class EmptyLogger(object):
'''
Provides an implementation that performs no logging
'''
def debug(self, *k, **kw):
pass
def info(self, *k, **kw):
pass
def warn(self, *k, **kw):
pass
def error(self, *k, **kw):
pass
def critical(self, *k, **kw):
pass
def setLevel(self, *k, **kw):
pass
urls = (
'http://downloads.sourceforge.net/project/mingw-w64/Toolchains%20'
'targetting%20Win32/Personal%20Builds/mingw-builds/installer/'
'repository.txt',
'http://downloads.sourceforge.net/project/mingwbuilds/host-windows/'
'repository.txt'
)
'''
A list of mingw-build repositories
'''
def repository(urls = urls, log = EmptyLogger()):
'''
Downloads and parse mingw-build repository files and parses them
'''
log.info('getting mingw-builds repository')
versions = {}
re_sourceforge = re.compile(r'http://sourceforge.net/projects/([^/]+)/files')
re_sub = r'http://downloads.sourceforge.net/project/\1'
for url in urls:
log.debug(' - requesting: %s', url)
socket = request.urlopen(url)
repo = socket.read()
if not isinstance(repo, str):
repo = repo.decode();
socket.close()
for entry in repo.split('\n')[:-1]:
value = entry.split('|')
version = tuple([int(n) for n in value[0].strip().split('.')])
version = versions.setdefault(version, {})
arch = value[1].strip()
if arch == 'x32':
arch = 'i686'
elif arch == 'x64':
arch = 'x86_64'
arch = version.setdefault(arch, {})
threading = arch.setdefault(value[2].strip(), {})
exceptions = threading.setdefault(value[3].strip(), {})
revision = exceptions.setdefault(int(value[4].strip()[3:]),
re_sourceforge.sub(re_sub, value[5].strip()))
return versions
def find_in_path(file, path=None):
'''
Attempts to find an executable in the path
'''
if platform.system() == 'Windows':
file += '.exe'
if path is None:
path = os.environ.get('PATH', '')
if type(path) is type(''):
path = path.split(os.pathsep)
return list(filter(os.path.exists,
map(lambda dir, file=file: os.path.join(dir, file), path)))
def find_7zip(log = EmptyLogger()):
'''
Attempts to find 7zip for unpacking the mingw-build archives
'''
log.info('finding 7zip')
path = find_in_path('7z')
if not path:
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r'SOFTWARE\7-Zip')
path, _ = winreg.QueryValueEx(key, 'Path')
path = [os.path.join(path, '7z.exe')]
log.debug('found \'%s\'', path[0])
return path[0]
find_7zip()
def unpack(archive, location, log = EmptyLogger()):
'''
Unpacks a mingw-builds archive
'''
sevenzip = find_7zip(log)
log.info('unpacking %s', os.path.basename(archive))
cmd = [sevenzip, 'x', archive, '-o' + location, '-y']
log.debug(' - %r', cmd)
with open(os.devnull, 'w') as devnull:
subprocess.check_call(cmd, stdout = devnull)
def download(url, location, log = EmptyLogger()):
'''
Downloads and unpacks a mingw-builds archive
'''
log.info('downloading MinGW')
log.debug(' - url: %s', url)
log.debug(' - location: %s', location)
re_content = re.compile(r'attachment;[ \t]*filename=(")?([^"]*)(")?[\r\n]*')
stream = request.urlopen(url)
try:
content = stream.getheader('Content-Disposition') or ''
except AttributeError:
content = stream.headers.getheader('Content-Disposition') or ''
matches = re_content.match(content)
if matches:
filename = matches.group(2)
else:
parsed = parse.urlparse(stream.geturl())
filename = os.path.basename(parsed.path)
try:
os.makedirs(location)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(location):
pass
else:
raise
archive = os.path.join(location, filename)
with open(archive, 'wb') as out:
while True:
buf = stream.read(1024)
if not buf:
break
out.write(buf)
unpack(archive, location, log = log)
os.remove(archive)
possible = os.path.join(location, 'mingw64')
if not os.path.exists(possible):
possible = os.path.join(location, 'mingw32')
if not os.path.exists(possible):
raise ValueError('Failed to find unpacked MinGW: ' + possible)
return possible
def root(location = None, arch = None, version = None, threading = None,
exceptions = None, revision = None, log = EmptyLogger()):
'''
Returns the root folder of a specific version of the mingw-builds variant
of gcc. Will download the compiler if needed
'''
# Get the repository if we don't have all the information
if not (arch and version and threading and exceptions and revision):
versions = repository(log = log)
# Determine some defaults
version = version or max(versions.keys())
if not arch:
arch = platform.machine().lower()
if arch == 'x86':
arch = 'i686'
elif arch == 'amd64':
arch = 'x86_64'
if not threading:
keys = versions[version][arch].keys()
if 'posix' in keys:
threading = 'posix'
elif 'win32' in keys:
threading = 'win32'
else:
threading = keys[0]
if not exceptions:
keys = versions[version][arch][threading].keys()
if 'seh' in keys:
exceptions = 'seh'
elif 'sjlj' in keys:
exceptions = 'sjlj'
else:
exceptions = keys[0]
if revision == None:
revision = max(versions[version][arch][threading][exceptions].keys())
if not location:
location = os.path.join(tempfile.gettempdir(), 'mingw-builds')
# Get the download url
url = versions[version][arch][threading][exceptions][revision]
# Tell the user whatzzup
log.info('finding MinGW %s', '.'.join(str(v) for v in version))
log.debug(' - arch: %s', arch)
log.debug(' - threading: %s', threading)
log.debug(' - exceptions: %s', exceptions)
log.debug(' - revision: %s', revision)
log.debug(' - url: %s', url)
# Store each specific revision differently
slug = '{version}-{arch}-{threading}-{exceptions}-rev{revision}'
slug = slug.format(
version = '.'.join(str(v) for v in version),
arch = arch,
threading = threading,
exceptions = exceptions,
revision = revision
)
if arch == 'x86_64':
root_dir = os.path.join(location, slug, 'mingw64')
elif arch == 'i686':
root_dir = os.path.join(location, slug, 'mingw32')
else:
raise ValueError('Unknown MinGW arch: ' + arch)
# Download if needed
if not os.path.exists(root_dir):
downloaded = download(url, os.path.join(location, slug), log = log)
if downloaded != root_dir:
raise ValueError('The location of mingw did not match\n%s\n%s'
% (downloaded, root_dir))
return root_dir
def str2ver(string):
'''
Converts a version string into a tuple
'''
try:
version = tuple(int(v) for v in string.split('.'))
if len(version) is not 3:
raise ValueError()
except ValueError:
raise argparse.ArgumentTypeError(
'please provide a three digit version string')
return version
def main():
'''
Invoked when the script is run directly by the python interpreter
'''
parser = argparse.ArgumentParser(
description = 'Downloads a specific version of MinGW',
formatter_class = argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--location',
help = 'the location to download the compiler to',
default = os.path.join(tempfile.gettempdir(), 'mingw-builds'))
parser.add_argument('--arch', required = True, choices = ['i686', 'x86_64'],
help = 'the target MinGW architecture string')
parser.add_argument('--version', type = str2ver,
help = 'the version of GCC to download')
parser.add_argument('--threading', choices = ['posix', 'win32'],
help = 'the threading type of the compiler')
parser.add_argument('--exceptions', choices = ['sjlj', 'seh', 'dwarf'],
help = 'the method to throw exceptions')
parser.add_argument('--revision', type=int,
help = 'the revision of the MinGW release')
group = parser.add_mutually_exclusive_group()
group.add_argument('-v', '--verbose', action='store_true',
help='increase the script output verbosity')
group.add_argument('-q', '--quiet', action='store_true',
help='only print errors and warning')
args = parser.parse_args()
# Create the logger
logger = logging.getLogger('mingw')
handler = logging.StreamHandler()
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
if args.quiet:
logger.setLevel(logging.WARN)
if args.verbose:
logger.setLevel(logging.DEBUG)
# Get MinGW
root_dir = root(location = args.location, arch = args.arch,
version = args.version, threading = args.threading,
exceptions = args.exceptions, revision = args.revision,
log = logger)
sys.stdout.write('%s\n' % os.path.join(root_dir, 'bin'))
if __name__ == '__main__':
try:
main()
except IOError as e:
sys.stderr.write('IO error: %s\n' % e)
sys.exit(1)
except OSError as e:
sys.stderr.write('OS error: %s\n' % e)
sys.exit(1)
except KeyboardInterrupt as e:
sys.stderr.write('Killed\n')
sys.exit(1)

View File

@@ -1,16 +0,0 @@
# How to release
* Make sure you're on master and synced to HEAD
* Ensure the project builds and tests run (sanity check only, obviously)
* `parallel -j0 exec ::: test/*_test` can help ensure everything at least
passes
* Prepare release notes
* `git log $(git describe --abbrev=0 --tags)..HEAD` gives you the list of
commits between the last annotated tag and HEAD
* Pick the most interesting.
* Create a release through github's interface
* Note this will create a lightweight tag.
* Update this to an annotated tag:
* `git pull --tags`
* `git tag -a -f <tag> <tag>`
* `git push --force origin`

20
benchmarks/thirdparty/benchmark/src/CMakeLists.txt vendored Executable file → Normal file
View File

@@ -11,7 +11,6 @@ file(GLOB
*.cc
${PROJECT_SOURCE_DIR}/include/benchmark/*.h
${CMAKE_CURRENT_SOURCE_DIR}/*.h)
list(FILTER SOURCE_FILES EXCLUDE REGEX "benchmark_main\\.cc")
add_library(benchmark ${SOURCE_FILES})
set_target_properties(benchmark PROPERTIES
@@ -35,23 +34,6 @@ if(${CMAKE_SYSTEM_NAME} MATCHES "Windows")
target_link_libraries(benchmark Shlwapi)
endif()
# We need extra libraries on Solaris
if(${CMAKE_SYSTEM_NAME} MATCHES "SunOS")
target_link_libraries(benchmark kstat)
endif()
# Benchmark main library
add_library(benchmark_main "benchmark_main.cc")
set_target_properties(benchmark_main PROPERTIES
OUTPUT_NAME "benchmark_main"
VERSION ${GENERIC_LIB_VERSION}
SOVERSION ${GENERIC_LIB_SOVERSION}
)
target_include_directories(benchmark PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/../include>
)
target_link_libraries(benchmark_main benchmark)
set(include_install_dir "include")
set(lib_install_dir "lib/")
set(bin_install_dir "bin/")
@@ -78,7 +60,7 @@ configure_file("${PROJECT_SOURCE_DIR}/cmake/benchmark.pc.in" "${pkg_config}" @ON
if (BENCHMARK_ENABLE_INSTALL)
# Install target (will install the library to specified CMAKE_INSTALL_PREFIX variable)
install(
TARGETS benchmark benchmark_main
TARGETS benchmark
EXPORT ${targets_export_name}
ARCHIVE DESTINATION ${lib_install_dir}
LIBRARY DESTINATION ${lib_install_dir}

0
benchmarks/thirdparty/benchmark/src/arraysize.h vendored Executable file → Normal file
View File

174
benchmarks/thirdparty/benchmark/src/benchmark.cc vendored Executable file → Normal file
View File

@@ -17,9 +17,7 @@
#include "internal_macros.h"
#ifndef BENCHMARK_OS_WINDOWS
#ifndef BENCHMARK_OS_FUCHSIA
#include <sys/resource.h>
#endif
#include <sys/time.h>
#include <unistd.h>
#endif
@@ -29,10 +27,10 @@
#include <condition_variable>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <fstream>
#include <iostream>
#include <memory>
#include <string>
#include <thread>
#include "check.h"
@@ -46,8 +44,7 @@
#include "re.h"
#include "statistics.h"
#include "string_util.h"
#include "thread_manager.h"
#include "thread_timer.h"
#include "timers.h"
DEFINE_bool(benchmark_list_tests, false,
"Print a list of benchmarks. This option overrides all other "
@@ -85,7 +82,7 @@ DEFINE_string(benchmark_out_format, "json",
"The format to use for file output. Valid values are "
"'console', 'json', or 'csv'.");
DEFINE_string(benchmark_out, "", "The file to write additional output to");
DEFINE_string(benchmark_out, "", "The file to write additonal output to");
DEFINE_string(benchmark_color, "auto",
"Whether to use colors in the output. Valid values: "
@@ -111,11 +108,118 @@ namespace internal {
void UseCharPointer(char const volatile*) {}
class ThreadManager {
public:
ThreadManager(int num_threads)
: alive_threads_(num_threads), start_stop_barrier_(num_threads) {}
Mutex& GetBenchmarkMutex() const RETURN_CAPABILITY(benchmark_mutex_) {
return benchmark_mutex_;
}
bool StartStopBarrier() EXCLUDES(end_cond_mutex_) {
return start_stop_barrier_.wait();
}
void NotifyThreadComplete() EXCLUDES(end_cond_mutex_) {
start_stop_barrier_.removeThread();
if (--alive_threads_ == 0) {
MutexLock lock(end_cond_mutex_);
end_condition_.notify_all();
}
}
void WaitForAllThreads() EXCLUDES(end_cond_mutex_) {
MutexLock lock(end_cond_mutex_);
end_condition_.wait(lock.native_handle(),
[this]() { return alive_threads_ == 0; });
}
public:
struct Result {
double real_time_used = 0;
double cpu_time_used = 0;
double manual_time_used = 0;
int64_t bytes_processed = 0;
int64_t items_processed = 0;
int complexity_n = 0;
std::string report_label_;
std::string error_message_;
bool has_error_ = false;
UserCounters counters;
};
GUARDED_BY(GetBenchmarkMutex()) Result results;
private:
mutable Mutex benchmark_mutex_;
std::atomic<int> alive_threads_;
Barrier start_stop_barrier_;
Mutex end_cond_mutex_;
Condition end_condition_;
};
// Timer management class
class ThreadTimer {
public:
ThreadTimer() = default;
// Called by each thread
void StartTimer() {
running_ = true;
start_real_time_ = ChronoClockNow();
start_cpu_time_ = ThreadCPUUsage();
}
// Called by each thread
void StopTimer() {
CHECK(running_);
running_ = false;
real_time_used_ += ChronoClockNow() - start_real_time_;
// Floating point error can result in the subtraction producing a negative
// time. Guard against that.
cpu_time_used_ += std::max<double>(ThreadCPUUsage() - start_cpu_time_, 0);
}
// Called by each thread
void SetIterationTime(double seconds) { manual_time_used_ += seconds; }
bool running() const { return running_; }
// REQUIRES: timer is not running
double real_time_used() {
CHECK(!running_);
return real_time_used_;
}
// REQUIRES: timer is not running
double cpu_time_used() {
CHECK(!running_);
return cpu_time_used_;
}
// REQUIRES: timer is not running
double manual_time_used() {
CHECK(!running_);
return manual_time_used_;
}
private:
bool running_ = false; // Is the timer running
double start_real_time_ = 0; // If running_
double start_cpu_time_ = 0; // If running_
// Accumulated time so far (does not contain current slice if running_)
double real_time_used_ = 0;
double cpu_time_used_ = 0;
// Manually set iteration time. User sets this with SetIterationTime(seconds).
double manual_time_used_ = 0;
};
namespace {
BenchmarkReporter::Run CreateRunReport(
const benchmark::internal::Benchmark::Instance& b,
const internal::ThreadManager::Result& results,
const internal::ThreadManager::Result& results, size_t iters,
double seconds) {
// Create report about this benchmark run.
BenchmarkReporter::Run report;
@@ -124,8 +228,8 @@ BenchmarkReporter::Run CreateRunReport(
report.error_occurred = results.has_error_;
report.error_message = results.error_message_;
report.report_label = results.report_label_;
// This is the total iterations across all threads.
report.iterations = results.iterations;
// Report the total iterations across all threads.
report.iterations = static_cast<int64_t>(iters) * b.threads;
report.time_unit = b.time_unit;
if (!report.error_occurred) {
@@ -164,12 +268,11 @@ void RunInThread(const benchmark::internal::Benchmark::Instance* b,
internal::ThreadTimer timer;
State st(iters, b->arg, thread_id, b->threads, &timer, manager);
b->benchmark->Run(st);
CHECK(st.iterations() >= st.max_iterations)
CHECK(st.iterations() == st.max_iterations)
<< "Benchmark returned before State::KeepRunning() returned false!";
{
MutexLock l(manager->GetBenchmarkMutex());
internal::ThreadManager::Result& results = manager->results;
results.iterations += st.iterations();
results.cpu_time_used += timer.cpu_time_used();
results.real_time_used += timer.real_time_used();
results.manual_time_used += timer.manual_time_used();
@@ -237,17 +340,18 @@ std::vector<BenchmarkReporter::Run> RunBenchmark(
// Determine if this run should be reported; Either it has
// run for a sufficient amount of time or because an error was reported.
const bool should_report = repetition_num > 0
|| has_explicit_iteration_count // An exact iteration count was requested
|| has_explicit_iteration_count // An exact iteration count was requested
|| results.has_error_
|| iters >= kMaxIterations // No chance to try again, we hit the limit.
|| seconds >= min_time // the elapsed time is large enough
|| iters >= kMaxIterations
|| seconds >= min_time // the elapsed time is large enough
// CPU time is specified but the elapsed real time greatly exceeds the
// minimum time. Note that user provided timers are except from this
// sanity check.
|| ((results.real_time_used >= 5 * min_time) && !b.use_manual_time);
if (should_report) {
BenchmarkReporter::Run report = CreateRunReport(b, results, seconds);
BenchmarkReporter::Run report =
CreateRunReport(b, results, iters, seconds);
if (!report.error_occurred && b.complexity != oNone)
complexity_reports->push_back(report);
reports.push_back(report);
@@ -290,44 +394,26 @@ std::vector<BenchmarkReporter::Run> RunBenchmark(
} // namespace
} // namespace internal
State::State(size_t max_iters, const std::vector<int64_t>& ranges, int thread_i,
State::State(size_t max_iters, const std::vector<int>& ranges, int thread_i,
int n_threads, internal::ThreadTimer* timer,
internal::ThreadManager* manager)
: total_iterations_(0),
batch_leftover_(0),
max_iterations(max_iters),
started_(false),
: started_(false),
finished_(false),
error_occurred_(false),
total_iterations_(max_iters + 1),
range_(ranges),
bytes_processed_(0),
items_processed_(0),
complexity_n_(0),
error_occurred_(false),
counters(),
thread_index(thread_i),
threads(n_threads),
max_iterations(max_iters),
timer_(timer),
manager_(manager) {
CHECK(max_iterations != 0) << "At least one iteration must be run";
CHECK(total_iterations_ != 0) << "max iterations wrapped around";
CHECK_LT(thread_index, threads) << "thread_index must be less than threads";
// Note: The use of offsetof below is technically undefined until C++17
// because State is not a standard layout type. However, all compilers
// currently provide well-defined behavior as an extension (which is
// demonstrated since constexpr evaluation must diagnose all undefined
// behavior). However, GCC and Clang also warn about this use of offsetof,
// which must be suppressed.
#ifdef __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Winvalid-offsetof"
#endif
// Offset tests to ensure commonly accessed data is on the first cache line.
const int cache_line_size = 64;
static_assert(offsetof(State, error_occurred_) <=
(cache_line_size - sizeof(error_occurred_)), "");
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
}
void State::PauseTiming() {
@@ -351,7 +437,7 @@ void State::SkipWithError(const char* msg) {
manager_->results.has_error_ = true;
}
}
total_iterations_ = 0;
total_iterations_ = 1;
if (timer_->running()) timer_->StopTimer();
}
@@ -367,7 +453,6 @@ void State::SetLabel(const char* label) {
void State::StartKeepRunning() {
CHECK(!started_ && !finished_);
started_ = true;
total_iterations_ = error_occurred_ ? 0 : max_iterations;
manager_->StartStopBarrier();
if (!error_occurred_) ResumeTiming();
}
@@ -377,8 +462,8 @@ void State::FinishKeepRunning() {
if (!error_occurred_) {
PauseTiming();
}
// Total iterations has now wrapped around past 0. Fix this.
total_iterations_ = 0;
// Total iterations has now wrapped around zero. Fix this.
total_iterations_ = 1;
finished_ = true;
manager_->StartStopBarrier();
}
@@ -410,7 +495,7 @@ void RunBenchmarks(const std::vector<Benchmark::Instance>& benchmarks,
BenchmarkReporter::Context context;
context.name_field_width = name_field_width;
// Keep track of running times of all instances of current benchmark
// Keep track of runing times of all instances of current benchmark
std::vector<BenchmarkReporter::Run> complexity_reports;
// We flush streams after invoking reporter methods that write to them. This
@@ -568,7 +653,6 @@ void PrintUsageAndExit() {
void ParseCommandLineFlags(int* argc, char** argv) {
using namespace benchmark;
BenchmarkReporter::Context::executable_name = argv[0];
for (int i = 1; i < *argc; ++i) {
if (ParseBoolFlag(argv[i], "benchmark_list_tests",
&FLAGS_benchmark_list_tests) ||

2
benchmarks/thirdparty/benchmark/src/benchmark_api_internal.h vendored Executable file → Normal file
View File

@@ -17,7 +17,7 @@ struct Benchmark::Instance {
std::string name;
Benchmark* benchmark;
ReportMode report_mode;
std::vector<int64_t> arg;
std::vector<int> arg;
TimeUnit time_unit;
int range_multiplier;
bool use_real_time;

View File

@@ -1,17 +0,0 @@
// Copyright 2018 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "benchmark/benchmark.h"
BENCHMARK_MAIN();

83
benchmarks/thirdparty/benchmark/src/benchmark_register.cc vendored Executable file → Normal file
View File

@@ -12,12 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "benchmark_register.h"
#include "benchmark/benchmark.h"
#include "benchmark_api_internal.h"
#include "internal_macros.h"
#ifndef BENCHMARK_OS_WINDOWS
#ifndef BENCHMARK_OS_FUCHSIA
#include <sys/resource.h>
#endif
#include <sys/time.h>
#include <unistd.h>
#endif
@@ -34,16 +34,13 @@
#include <sstream>
#include <thread>
#include "benchmark/benchmark.h"
#include "benchmark_api_internal.h"
#include "check.h"
#include "commandlineflags.h"
#include "complexity.h"
#include "internal_macros.h"
#include "statistics.h"
#include "log.h"
#include "mutex.h"
#include "re.h"
#include "statistics.h"
#include "string_util.h"
#include "timers.h"
@@ -77,7 +74,7 @@ class BenchmarkFamilies {
// Extract the list of benchmark instances that match the specified
// regular expression.
bool FindBenchmarks(std::string re,
bool FindBenchmarks(const std::string& re,
std::vector<Benchmark::Instance>* benchmarks,
std::ostream* Err);
@@ -107,18 +104,13 @@ void BenchmarkFamilies::ClearBenchmarks() {
}
bool BenchmarkFamilies::FindBenchmarks(
std::string spec, std::vector<Benchmark::Instance>* benchmarks,
const std::string& spec, std::vector<Benchmark::Instance>* benchmarks,
std::ostream* ErrStream) {
CHECK(ErrStream);
auto& Err = *ErrStream;
// Make regular expression out of command-line flag
std::string error_msg;
Regex re;
bool isNegativeFilter = false;
if(spec[0] == '-') {
spec.replace(0, 1, "");
isNegativeFilter = true;
}
if (!re.Init(spec, &error_msg)) {
Err << "Could not compile benchmark re: " << error_msg << std::endl;
return false;
@@ -178,20 +170,20 @@ bool BenchmarkFamilies::FindBenchmarks(
const auto& arg_name = family->arg_names_[arg_i];
if (!arg_name.empty()) {
instance.name +=
StrFormat("%s:", family->arg_names_[arg_i].c_str());
StringPrintF("%s:", family->arg_names_[arg_i].c_str());
}
}
instance.name += StrFormat("%d", arg);
instance.name += StringPrintF("%d", arg);
++arg_i;
}
if (!IsZero(family->min_time_))
instance.name += StrFormat("/min_time:%0.3f", family->min_time_);
instance.name += StringPrintF("/min_time:%0.3f", family->min_time_);
if (family->iterations_ != 0)
instance.name += StrFormat("/iterations:%d", family->iterations_);
instance.name += StringPrintF("/iterations:%d", family->iterations_);
if (family->repetitions_ != 0)
instance.name += StrFormat("/repeats:%d", family->repetitions_);
instance.name += StringPrintF("/repeats:%d", family->repetitions_);
if (family->use_manual_time_) {
instance.name += "/manual_time";
@@ -201,11 +193,10 @@ bool BenchmarkFamilies::FindBenchmarks(
// Add the number of threads used to the name
if (!family->thread_counts_.empty()) {
instance.name += StrFormat("/threads:%d", instance.threads);
instance.name += StringPrintF("/threads:%d", instance.threads);
}
if ((re.Match(instance.name) && !isNegativeFilter) ||
(!re.Match(instance.name) && isNegativeFilter)) {
if (re.Match(instance.name)) {
instance.last_benchmark_instance = (&args == &family->args_.back());
benchmarks->push_back(std::move(instance));
}
@@ -253,7 +244,30 @@ Benchmark::Benchmark(const char* name)
Benchmark::~Benchmark() {}
Benchmark* Benchmark::Arg(int64_t x) {
void Benchmark::AddRange(std::vector<int>* dst, int lo, int hi, int mult) {
CHECK_GE(lo, 0);
CHECK_GE(hi, lo);
CHECK_GE(mult, 2);
// Add "lo"
dst->push_back(lo);
static const int kint32max = std::numeric_limits<int32_t>::max();
// Now space out the benchmarks in multiples of "mult"
for (int32_t i = 1; i < kint32max / mult; i *= mult) {
if (i >= hi) break;
if (i > lo) {
dst->push_back(i);
}
}
// Add "hi" (if different from "lo")
if (hi != lo) {
dst->push_back(hi);
}
}
Benchmark* Benchmark::Arg(int x) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
args_.push_back({x});
return this;
@@ -264,21 +278,20 @@ Benchmark* Benchmark::Unit(TimeUnit unit) {
return this;
}
Benchmark* Benchmark::Range(int64_t start, int64_t limit) {
Benchmark* Benchmark::Range(int start, int limit) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
std::vector<int64_t> arglist;
std::vector<int> arglist;
AddRange(&arglist, start, limit, range_multiplier_);
for (int64_t i : arglist) {
for (int i : arglist) {
args_.push_back({i});
}
return this;
}
Benchmark* Benchmark::Ranges(
const std::vector<std::pair<int64_t, int64_t>>& ranges) {
Benchmark* Benchmark::Ranges(const std::vector<std::pair<int, int>>& ranges) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(ranges.size()));
std::vector<std::vector<int64_t>> arglists(ranges.size());
std::vector<std::vector<int>> arglists(ranges.size());
std::size_t total = 1;
for (std::size_t i = 0; i < ranges.size(); i++) {
AddRange(&arglists[i], ranges[i].first, ranges[i].second,
@@ -289,7 +302,7 @@ Benchmark* Benchmark::Ranges(
std::vector<std::size_t> ctr(arglists.size(), 0);
for (std::size_t i = 0; i < total; i++) {
std::vector<int64_t> tmp;
std::vector<int> tmp;
tmp.reserve(arglists.size());
for (std::size_t j = 0; j < arglists.size(); j++) {
@@ -321,17 +334,17 @@ Benchmark* Benchmark::ArgNames(const std::vector<std::string>& names) {
return this;
}
Benchmark* Benchmark::DenseRange(int64_t start, int64_t limit, int step) {
Benchmark* Benchmark::DenseRange(int start, int limit, int step) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
CHECK_GE(start, 0);
CHECK_LE(start, limit);
for (int64_t arg = start; arg <= limit; arg += step) {
for (int arg = start; arg <= limit; arg += step) {
args_.push_back({arg});
}
return this;
}
Benchmark* Benchmark::Args(const std::vector<int64_t>& args) {
Benchmark* Benchmark::Args(const std::vector<int>& args) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(args.size()));
args_.push_back(args);
return this;
@@ -348,6 +361,7 @@ Benchmark* Benchmark::RangeMultiplier(int multiplier) {
return this;
}
Benchmark* Benchmark::MinTime(double t) {
CHECK(t > 0.0);
CHECK(iterations_ == 0);
@@ -355,6 +369,7 @@ Benchmark* Benchmark::MinTime(double t) {
return this;
}
Benchmark* Benchmark::Iterations(size_t n) {
CHECK(n > 0);
CHECK(IsZero(min_time_));

View File

@@ -1,33 +0,0 @@
#ifndef BENCHMARK_REGISTER_H
#define BENCHMARK_REGISTER_H
#include <vector>
#include "check.h"
template <typename T>
void AddRange(std::vector<T>* dst, T lo, T hi, int mult) {
CHECK_GE(lo, 0);
CHECK_GE(hi, lo);
CHECK_GE(mult, 2);
// Add "lo"
dst->push_back(lo);
static const T kmax = std::numeric_limits<T>::max();
// Now space out the benchmarks in multiples of "mult"
for (T i = 1; i < kmax / mult; i *= mult) {
if (i >= hi) break;
if (i > lo) {
dst->push_back(i);
}
}
// Add "hi" (if different from "lo")
if (hi != lo) {
dst->push_back(hi);
}
}
#endif // BENCHMARK_REGISTER_H

0
benchmarks/thirdparty/benchmark/src/check.h vendored Executable file → Normal file
View File

0
benchmarks/thirdparty/benchmark/src/colorprint.cc vendored Executable file → Normal file
View File

0
benchmarks/thirdparty/benchmark/src/colorprint.h vendored Executable file → Normal file
View File

0
benchmarks/thirdparty/benchmark/src/commandlineflags.cc vendored Executable file → Normal file
View File

0
benchmarks/thirdparty/benchmark/src/commandlineflags.h vendored Executable file → Normal file
View File

22
benchmarks/thirdparty/benchmark/src/complexity.cc vendored Executable file → Normal file
View File

@@ -28,18 +28,18 @@ namespace benchmark {
BigOFunc* FittingCurve(BigO complexity) {
switch (complexity) {
case oN:
return [](int64_t n) -> double { return static_cast<double>(n); };
return [](int n) -> double { return n; };
case oNSquared:
return [](int64_t n) -> double { return std::pow(n, 2); };
return [](int n) -> double { return std::pow(n, 2); };
case oNCubed:
return [](int64_t n) -> double { return std::pow(n, 3); };
return [](int n) -> double { return std::pow(n, 3); };
case oLogN:
return [](int64_t n) { return log2(n); };
return [](int n) { return log2(n); };
case oNLogN:
return [](int64_t n) { return n * log2(n); };
return [](int n) { return n * log2(n); };
case o1:
default:
return [](int64_t) { return 1.0; };
return [](int) { return 1.0; };
}
}
@@ -65,15 +65,15 @@ std::string GetBigOString(BigO complexity) {
// Find the coefficient for the high-order term in the running time, by
// minimizing the sum of squares of relative error, for the fitting curve
// given by the lambda expression.
// given by the lambda expresion.
// - n : Vector containing the size of the benchmark tests.
// - time : Vector containing the times for the benchmark tests.
// - fitting_curve : lambda expression (e.g. [](int64_t n) {return n; };).
// - fitting_curve : lambda expresion (e.g. [](int n) {return n; };).
// For a deeper explanation on the algorithm logic, look the README file at
// http://github.com/ismaelJimenez/Minimal-Cpp-Least-Squared-Fit
LeastSq MinimalLeastSq(const std::vector<int64_t>& n,
LeastSq MinimalLeastSq(const std::vector<int>& n,
const std::vector<double>& time,
BigOFunc* fitting_curve) {
double sigma_gn = 0.0;
@@ -117,7 +117,7 @@ LeastSq MinimalLeastSq(const std::vector<int64_t>& n,
// - complexity : If different than oAuto, the fitting curve will stick to
// this one. If it is oAuto, it will be calculated the best
// fitting curve.
LeastSq MinimalLeastSq(const std::vector<int64_t>& n,
LeastSq MinimalLeastSq(const std::vector<int>& n,
const std::vector<double>& time, const BigO complexity) {
CHECK_EQ(n.size(), time.size());
CHECK_GE(n.size(), 2); // Do not compute fitting curve is less than two
@@ -157,7 +157,7 @@ std::vector<BenchmarkReporter::Run> ComputeBigO(
if (reports.size() < 2) return results;
// Accumulators.
std::vector<int64_t> n;
std::vector<int> n;
std::vector<double> real_time;
std::vector<double> cpu_time;

0
benchmarks/thirdparty/benchmark/src/complexity.h vendored Executable file → Normal file
View File

0
benchmarks/thirdparty/benchmark/src/console_reporter.cc vendored Executable file → Normal file
View File

0
benchmarks/thirdparty/benchmark/src/counter.cc vendored Executable file → Normal file
View File

0
benchmarks/thirdparty/benchmark/src/counter.h vendored Executable file → Normal file
View File

0
benchmarks/thirdparty/benchmark/src/csv_reporter.cc vendored Executable file → Normal file
View File

5
benchmarks/thirdparty/benchmark/src/cycleclock.h vendored Executable file → Normal file
View File

@@ -159,11 +159,6 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() {
struct timeval tv;
gettimeofday(&tv, nullptr);
return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
#elif defined(__s390__) // Covers both s390 and s390x.
// Return the CPU clock.
uint64_t tsc;
asm("stck %0" : "=Q" (tsc) : : "cc");
return tsc;
#else
// The soft failover to a generic implementation is automatic only for ARM.
// For other platforms the developer is expected to make an attempt to create

9
benchmarks/thirdparty/benchmark/src/internal_macros.h vendored Executable file → Normal file
View File

@@ -39,7 +39,6 @@
#elif defined(_WIN32)
#define BENCHMARK_OS_WINDOWS 1
#elif defined(__APPLE__)
#define BENCHMARK_OS_APPLE 1
#include "TargetConditionals.h"
#if defined(TARGET_OS_MAC)
#define BENCHMARK_OS_MACOSX 1
@@ -51,20 +50,14 @@
#define BENCHMARK_OS_FREEBSD 1
#elif defined(__NetBSD__)
#define BENCHMARK_OS_NETBSD 1
#elif defined(__OpenBSD__)
#define BENCHMARK_OS_OPENBSD 1
#elif defined(__linux__)
#define BENCHMARK_OS_LINUX 1
#elif defined(__native_client__)
#define BENCHMARK_OS_NACL 1
#elif defined(__EMSCRIPTEN__)
#elif defined(EMSCRIPTEN)
#define BENCHMARK_OS_EMSCRIPTEN 1
#elif defined(__rtems__)
#define BENCHMARK_OS_RTEMS 1
#elif defined(__Fuchsia__)
#define BENCHMARK_OS_FUCHSIA 1
#elif defined (__SVR4) && defined (__sun)
#define BENCHMARK_OS_SOLARIS 1
#endif
#if !__has_feature(cxx_exceptions) && !defined(__cpp_exceptions) \

10
benchmarks/thirdparty/benchmark/src/json_reporter.cc vendored Executable file → Normal file
View File

@@ -32,15 +32,15 @@ namespace benchmark {
namespace {
std::string FormatKV(std::string const& key, std::string const& value) {
return StrFormat("\"%s\": \"%s\"", key.c_str(), value.c_str());
return StringPrintF("\"%s\": \"%s\"", key.c_str(), value.c_str());
}
std::string FormatKV(std::string const& key, const char* value) {
return StrFormat("\"%s\": \"%s\"", key.c_str(), value);
return StringPrintF("\"%s\": \"%s\"", key.c_str(), value);
}
std::string FormatKV(std::string const& key, bool value) {
return StrFormat("\"%s\": %s", key.c_str(), value ? "true" : "false");
return StringPrintF("\"%s\": %s", key.c_str(), value ? "true" : "false");
}
std::string FormatKV(std::string const& key, int64_t value) {
@@ -77,10 +77,6 @@ bool JSONReporter::ReportContext(const Context& context) {
std::string walltime_value = LocalDateTimeString();
out << indent << FormatKV("date", walltime_value) << ",\n";
if (Context::executable_name) {
out << indent << FormatKV("executable", Context::executable_name) << ",\n";
}
CPUInfo const& info = context.cpu_info;
out << indent << FormatKV("num_cpus", static_cast<int64_t>(info.num_cpus))
<< ",\n";

0
benchmarks/thirdparty/benchmark/src/log.h vendored Executable file → Normal file
View File

0
benchmarks/thirdparty/benchmark/src/mutex.h vendored Executable file → Normal file
View File

24
benchmarks/thirdparty/benchmark/src/re.h vendored Executable file → Normal file
View File

@@ -17,31 +17,19 @@
#include "internal_macros.h"
#if !defined(HAVE_STD_REGEX) && \
!defined(HAVE_GNU_POSIX_REGEX) && \
!defined(HAVE_POSIX_REGEX)
// No explicit regex selection; detect based on builtin hints.
#if defined(BENCHMARK_OS_LINUX) || defined(BENCHMARK_OS_APPLE)
#define HAVE_POSIX_REGEX 1
#elif __cplusplus >= 199711L
#define HAVE_STD_REGEX 1
#endif
#endif
// Prefer C regex libraries when compiling w/o exceptions so that we can
// correctly report errors.
#if defined(BENCHMARK_HAS_NO_EXCEPTIONS) && \
defined(BENCHMARK_HAVE_STD_REGEX) && \
#if defined(BENCHMARK_HAS_NO_EXCEPTIONS) && defined(HAVE_STD_REGEX) && \
(defined(HAVE_GNU_POSIX_REGEX) || defined(HAVE_POSIX_REGEX))
#undef HAVE_STD_REGEX
#undef HAVE_STD_REGEX
#endif
#if defined(HAVE_STD_REGEX)
#include <regex>
#include <regex>
#elif defined(HAVE_GNU_POSIX_REGEX)
#include <gnuregex.h>
#include <gnuregex.h>
#elif defined(HAVE_POSIX_REGEX)
#include <regex.h>
#include <regex.h>
#else
#error No regular expression backend was found!
#endif
@@ -76,7 +64,7 @@ class Regex {
#elif defined(HAVE_POSIX_REGEX) || defined(HAVE_GNU_POSIX_REGEX)
regex_t re_;
#else
#error No regular expression backend implementation available
#error No regular expression backend implementation available
#endif
};

6
benchmarks/thirdparty/benchmark/src/reporter.cc vendored Executable file → Normal file
View File

@@ -37,9 +37,6 @@ void BenchmarkReporter::PrintBasicContext(std::ostream *out,
Out << LocalDateTimeString() << "\n";
if (context.executable_name)
Out << "Running " << context.executable_name << "\n";
const CPUInfo &info = context.cpu_info;
Out << "Run on (" << info.num_cpus << " X "
<< (info.cycles_per_second / 1000000.0) << " MHz CPU "
@@ -67,9 +64,6 @@ void BenchmarkReporter::PrintBasicContext(std::ostream *out,
#endif
}
// No initializer because it's already initialized to NULL.
const char* BenchmarkReporter::Context::executable_name;
BenchmarkReporter::Context::Context() : cpu_info(CPUInfo::Get()) {}
double BenchmarkReporter::Run::GetAdjustedRealTime() const {

0
benchmarks/thirdparty/benchmark/src/sleep.cc vendored Executable file → Normal file
View File

0
benchmarks/thirdparty/benchmark/src/sleep.h vendored Executable file → Normal file
View File

25
benchmarks/thirdparty/benchmark/src/statistics.cc vendored Executable file → Normal file
View File

@@ -30,25 +30,22 @@ auto StatisticsSum = [](const std::vector<double>& v) {
};
double StatisticsMean(const std::vector<double>& v) {
if (v.empty()) return 0.0;
if (v.size() == 0) return 0.0;
return StatisticsSum(v) * (1.0 / v.size());
}
double StatisticsMedian(const std::vector<double>& v) {
if (v.size() < 3) return StatisticsMean(v);
std::vector<double> copy(v);
auto center = copy.begin() + v.size() / 2;
std::nth_element(copy.begin(), center, copy.end());
// did we have an odd number of samples?
// if yes, then center is the median
// it no, then we are looking for the average between center and the value before
std::vector<double> partial;
// we need roundDown(count/2)+1 slots
partial.resize(1 + (v.size() / 2));
std::partial_sort_copy(v.begin(), v.end(), partial.begin(), partial.end());
// did we have odd number of samples?
// if yes, then the last element of partially-sorted vector is the median
// it no, then the average of the last two elements is the median
if(v.size() % 2 == 1)
return *center;
auto center2 = copy.begin() + v.size() / 2 - 1;
std::nth_element(copy.begin(), center2, copy.end());
return (*center + *center2) / 2.0;
return partial.back();
return (partial[partial.size() - 2] + partial[partial.size() - 1]) / 2.0;
}
// Return the sum of the squares of this sample set
@@ -65,7 +62,7 @@ auto Sqrt = [](const double dat) {
double StatisticsStdDev(const std::vector<double>& v) {
const auto mean = StatisticsMean(v);
if (v.empty()) return mean;
if (v.size() == 0) return mean;
// Sample standard deviation is undefined for n = 1
if (v.size() == 1)

0
benchmarks/thirdparty/benchmark/src/statistics.h vendored Executable file → Normal file
View File

6
benchmarks/thirdparty/benchmark/src/string_util.cc vendored Executable file → Normal file
View File

@@ -122,7 +122,7 @@ std::string HumanReadableNumber(double n, double one_k) {
return ToBinaryStringFullySpecified(n, 1.1, 1, one_k);
}
std::string StrFormatImp(const char* msg, va_list args) {
std::string StringPrintFImp(const char* msg, va_list args) {
// we might need a second shot at this, so pre-emptivly make a copy
va_list args_cp;
va_copy(args_cp, args);
@@ -152,10 +152,10 @@ std::string StrFormatImp(const char* msg, va_list args) {
return std::string(buff_ptr.get());
}
std::string StrFormat(const char* format, ...) {
std::string StringPrintF(const char* format, ...) {
va_list args;
va_start(args, format);
std::string tmp = StrFormatImp(format, args);
std::string tmp = StringPrintFImp(format, args);
va_end(args);
return tmp;
}

10
benchmarks/thirdparty/benchmark/src/string_util.h vendored Executable file → Normal file
View File

@@ -12,23 +12,23 @@ void AppendHumanReadable(int n, std::string* str);
std::string HumanReadableNumber(double n, double one_k = 1024.0);
std::string StrFormat(const char* format, ...);
std::string StringPrintF(const char* format, ...);
inline std::ostream& StrCatImp(std::ostream& out) BENCHMARK_NOEXCEPT {
inline std::ostream& StringCatImp(std::ostream& out) BENCHMARK_NOEXCEPT {
return out;
}
template <class First, class... Rest>
inline std::ostream& StrCatImp(std::ostream& out, First&& f,
inline std::ostream& StringCatImp(std::ostream& out, First&& f,
Rest&&... rest) {
out << std::forward<First>(f);
return StrCatImp(out, std::forward<Rest>(rest)...);
return StringCatImp(out, std::forward<Rest>(rest)...);
}
template <class... Args>
inline std::string StrCat(Args&&... args) {
std::ostringstream ss;
StrCatImp(ss, std::forward<Args>(args)...);
StringCatImp(ss, std::forward<Args>(args)...);
return ss.str();
}

78
benchmarks/thirdparty/benchmark/src/sysinfo.cc vendored Executable file → Normal file
View File

@@ -16,26 +16,20 @@
#ifdef BENCHMARK_OS_WINDOWS
#include <Shlwapi.h>
#undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA
#include <VersionHelpers.h>
#include <Windows.h>
#else
#include <fcntl.h>
#ifndef BENCHMARK_OS_FUCHSIA
#include <sys/resource.h>
#endif
#include <sys/time.h>
#include <sys/types.h> // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD
#include <unistd.h>
#if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_MACOSX || \
defined BENCHMARK_OS_NETBSD || defined BENCHMARK_OS_OPENBSD
defined BENCHMARK_OS_NETBSD
#define BENCHMARK_HAS_SYSCTL
#include <sys/sysctl.h>
#endif
#endif
#if defined(BENCHMARK_OS_SOLARIS)
#include <kstat.h>
#endif
#include <algorithm>
#include <array>
@@ -136,26 +130,6 @@ struct ValueUnion {
};
ValueUnion GetSysctlImp(std::string const& Name) {
#if defined BENCHMARK_OS_OPENBSD
int mib[2];
mib[0] = CTL_HW;
if ((Name == "hw.ncpu") || (Name == "hw.cpuspeed")){
ValueUnion buff(sizeof(int));
if (Name == "hw.ncpu") {
mib[1] = HW_NCPU;
} else {
mib[1] = HW_CPUSPEED;
}
if (sysctl(mib, 2, buff.data(), &buff.Size, nullptr, 0) == -1) {
return ValueUnion();
}
return buff;
}
return ValueUnion();
#else
size_t CurBuffSize = 0;
if (sysctlbyname(Name.c_str(), nullptr, &CurBuffSize, nullptr, 0) == -1)
return ValueUnion();
@@ -164,7 +138,6 @@ ValueUnion GetSysctlImp(std::string const& Name) {
if (sysctlbyname(Name.c_str(), buff.data(), &buff.Size, nullptr, 0) == 0)
return buff;
return ValueUnion();
#endif
}
BENCHMARK_MAYBE_UNUSED
@@ -330,7 +303,7 @@ std::vector<CPUInfo::CacheInfo> GetCacheSizesWindows() {
if (!B.test(0)) continue;
CInfo* Cache = &it->Cache;
CPUInfo::CacheInfo C;
C.num_sharing = static_cast<int>(B.count());
C.num_sharing = B.count();
C.level = Cache->Level;
C.size = Cache->Size;
switch (Cache->Type) {
@@ -381,15 +354,6 @@ int GetNumCPUs() {
return sysinfo.dwNumberOfProcessors; // number of logical
// processors in the current
// group
#elif defined(BENCHMARK_OS_SOLARIS)
// Returns -1 in case of a failure.
int NumCPU = sysconf(_SC_NPROCESSORS_ONLN);
if (NumCPU < 0) {
fprintf(stderr,
"sysconf(_SC_NPROCESSORS_ONLN) failed with error: %s\n",
strerror(errno));
}
return NumCPU;
#else
int NumCPUs = 0;
int MaxID = -1;
@@ -477,7 +441,7 @@ double GetCPUCyclesPerSecond() {
std::string value;
if (SplitIdx != std::string::npos) value = ln.substr(SplitIdx + 1);
// When parsing the "cpu MHz" and "bogomips" (fallback) entries, we only
// accept positive values. Some environments (virtual machines) report zero,
// accept postive values. Some environments (virtual machines) report zero,
// which would cause infinite looping in WallTime_Init.
if (startsWithKey(ln, "cpu MHz")) {
if (!value.empty()) {
@@ -509,17 +473,12 @@ double GetCPUCyclesPerSecond() {
constexpr auto* FreqStr =
#if defined(BENCHMARK_OS_FREEBSD) || defined(BENCHMARK_OS_NETBSD)
"machdep.tsc_freq";
#elif defined BENCHMARK_OS_OPENBSD
"hw.cpuspeed";
#else
"hw.cpufrequency";
#endif
unsigned long long hz = 0;
#if defined BENCHMARK_OS_OPENBSD
if (GetSysctl(FreqStr, &hz)) return hz * 1000000;
#else
if (GetSysctl(FreqStr, &hz)) return hz;
#endif
fprintf(stderr, "Unable to determine clock rate from sysctl: %s: %s\n",
FreqStr, strerror(errno));
@@ -534,35 +493,6 @@ double GetCPUCyclesPerSecond() {
"~MHz", nullptr, &data, &data_size)))
return static_cast<double>((int64_t)data *
(int64_t)(1000 * 1000)); // was mhz
#elif defined (BENCHMARK_OS_SOLARIS)
kstat_ctl_t *kc = kstat_open();
if (!kc) {
std::cerr << "failed to open /dev/kstat\n";
return -1;
}
kstat_t *ksp = kstat_lookup(kc, (char*)"cpu_info", -1, (char*)"cpu_info0");
if (!ksp) {
std::cerr << "failed to lookup in /dev/kstat\n";
return -1;
}
if (kstat_read(kc, ksp, NULL) < 0) {
std::cerr << "failed to read from /dev/kstat\n";
return -1;
}
kstat_named_t *knp =
(kstat_named_t*)kstat_data_lookup(ksp, (char*)"current_clock_Hz");
if (!knp) {
std::cerr << "failed to lookup data in /dev/kstat\n";
return -1;
}
if (knp->data_type != KSTAT_DATA_UINT64) {
std::cerr << "current_clock_Hz is of unexpected data type: "
<< knp->data_type << "\n";
return -1;
}
double clock_hz = knp->value.ui64;
kstat_close(kc);
return clock_hz;
#endif
// If we've fallen through, attempt to roughly estimate the CPU clock rate.
const int estimate_time_ms = 1000;

View File

@@ -1,66 +0,0 @@
#ifndef BENCHMARK_THREAD_MANAGER_H
#define BENCHMARK_THREAD_MANAGER_H
#include <atomic>
#include "benchmark/benchmark.h"
#include "mutex.h"
namespace benchmark {
namespace internal {
class ThreadManager {
public:
ThreadManager(int num_threads)
: alive_threads_(num_threads), start_stop_barrier_(num_threads) {}
Mutex& GetBenchmarkMutex() const RETURN_CAPABILITY(benchmark_mutex_) {
return benchmark_mutex_;
}
bool StartStopBarrier() EXCLUDES(end_cond_mutex_) {
return start_stop_barrier_.wait();
}
void NotifyThreadComplete() EXCLUDES(end_cond_mutex_) {
start_stop_barrier_.removeThread();
if (--alive_threads_ == 0) {
MutexLock lock(end_cond_mutex_);
end_condition_.notify_all();
}
}
void WaitForAllThreads() EXCLUDES(end_cond_mutex_) {
MutexLock lock(end_cond_mutex_);
end_condition_.wait(lock.native_handle(),
[this]() { return alive_threads_ == 0; });
}
public:
struct Result {
int64_t iterations = 0;
double real_time_used = 0;
double cpu_time_used = 0;
double manual_time_used = 0;
int64_t bytes_processed = 0;
int64_t items_processed = 0;
int64_t complexity_n = 0;
std::string report_label_;
std::string error_message_;
bool has_error_ = false;
UserCounters counters;
};
GUARDED_BY(GetBenchmarkMutex()) Result results;
private:
mutable Mutex benchmark_mutex_;
std::atomic<int> alive_threads_;
Barrier start_stop_barrier_;
Mutex end_cond_mutex_;
Condition end_condition_;
};
} // namespace internal
} // namespace benchmark
#endif // BENCHMARK_THREAD_MANAGER_H

View File

@@ -1,69 +0,0 @@
#ifndef BENCHMARK_THREAD_TIMER_H
#define BENCHMARK_THREAD_TIMER_H
#include "check.h"
#include "timers.h"
namespace benchmark {
namespace internal {
class ThreadTimer {
public:
ThreadTimer() = default;
// Called by each thread
void StartTimer() {
running_ = true;
start_real_time_ = ChronoClockNow();
start_cpu_time_ = ThreadCPUUsage();
}
// Called by each thread
void StopTimer() {
CHECK(running_);
running_ = false;
real_time_used_ += ChronoClockNow() - start_real_time_;
// Floating point error can result in the subtraction producing a negative
// time. Guard against that.
cpu_time_used_ += std::max<double>(ThreadCPUUsage() - start_cpu_time_, 0);
}
// Called by each thread
void SetIterationTime(double seconds) { manual_time_used_ += seconds; }
bool running() const { return running_; }
// REQUIRES: timer is not running
double real_time_used() {
CHECK(!running_);
return real_time_used_;
}
// REQUIRES: timer is not running
double cpu_time_used() {
CHECK(!running_);
return cpu_time_used_;
}
// REQUIRES: timer is not running
double manual_time_used() {
CHECK(!running_);
return manual_time_used_;
}
private:
bool running_ = false; // Is the timer running
double start_real_time_ = 0; // If running_
double start_cpu_time_ = 0; // If running_
// Accumulated time so far (does not contain current slice if running_)
double real_time_used_ = 0;
double cpu_time_used_ = 0;
// Manually set iteration time. User sets this with SetIterationTime(seconds).
double manual_time_used_ = 0;
};
} // namespace internal
} // namespace benchmark
#endif // BENCHMARK_THREAD_TIMER_H

11
benchmarks/thirdparty/benchmark/src/timers.cc vendored Executable file → Normal file
View File

@@ -17,14 +17,11 @@
#ifdef BENCHMARK_OS_WINDOWS
#include <Shlwapi.h>
#undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA
#include <VersionHelpers.h>
#include <Windows.h>
#else
#include <fcntl.h>
#ifndef BENCHMARK_OS_FUCHSIA
#include <sys/resource.h>
#endif
#include <sys/time.h>
#include <sys/types.h> // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD
#include <unistd.h>
@@ -77,7 +74,7 @@ double MakeTime(FILETIME const& kernel_time, FILETIME const& user_time) {
static_cast<double>(user.QuadPart)) *
1e-7;
}
#elif !defined(BENCHMARK_OS_FUCHSIA)
#else
double MakeTime(struct rusage const& ru) {
return (static_cast<double>(ru.ru_utime.tv_sec) +
static_cast<double>(ru.ru_utime.tv_usec) * 1e-6 +
@@ -165,10 +162,6 @@ double ThreadCPUUsage() {
// RTEMS doesn't support CLOCK_THREAD_CPUTIME_ID. See
// https://github.com/RTEMS/rtems/blob/master/cpukit/posix/src/clockgettime.c
return ProcessCPUUsage();
#elif defined(BENCHMARK_OS_SOLARIS)
struct rusage ru;
if (getrusage(RUSAGE_LWP, &ru) == 0) return MakeTime(ru);
DiagnoseAndExit("getrusage(RUSAGE_LWP, ...) failed");
#elif defined(CLOCK_THREAD_CPUTIME_ID)
struct timespec ts;
if (clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts) == 0) return MakeTime(ts);
@@ -193,6 +186,7 @@ std::string DateTimeString(bool local) {
std::strftime(storage, sizeof(storage), "%x %X", ::localtime(&now));
#else
std::tm timeinfo;
std::memset(&timeinfo, 0, sizeof(std::tm));
::localtime_r(&now, &timeinfo);
written = std::strftime(storage, sizeof(storage), "%F %T", &timeinfo);
#endif
@@ -201,6 +195,7 @@ std::string DateTimeString(bool local) {
written = std::strftime(storage, sizeof(storage), "%x %X", ::gmtime(&now));
#else
std::tm timeinfo;
std::memset(&timeinfo, 0, sizeof(std::tm));
::gmtime_r(&now, &timeinfo);
written = std::strftime(storage, sizeof(storage), "%F %T", &timeinfo);
#endif

0
benchmarks/thirdparty/benchmark/src/timers.h vendored Executable file → Normal file
View File

View File

@@ -1,316 +0,0 @@
#!/usr/bin/env python
"""
compare.py - versatile benchmark output compare tool
"""
import argparse
from argparse import ArgumentParser
import sys
import gbench
from gbench import util, report
from gbench.util import *
def check_inputs(in1, in2, flags):
"""
Perform checking on the user provided inputs and diagnose any abnormalities
"""
in1_kind, in1_err = classify_input_file(in1)
in2_kind, in2_err = classify_input_file(in2)
output_file = find_benchmark_flag('--benchmark_out=', flags)
output_type = find_benchmark_flag('--benchmark_out_format=', flags)
if in1_kind == IT_Executable and in2_kind == IT_Executable and output_file:
print(("WARNING: '--benchmark_out=%s' will be passed to both "
"benchmarks causing it to be overwritten") % output_file)
if in1_kind == IT_JSON and in2_kind == IT_JSON and len(flags) > 0:
print("WARNING: passing optional flags has no effect since both "
"inputs are JSON")
if output_type is not None and output_type != 'json':
print(("ERROR: passing '--benchmark_out_format=%s' to 'compare.py`"
" is not supported.") % output_type)
sys.exit(1)
def create_parser():
parser = ArgumentParser(
description='versatile benchmark output compare tool')
subparsers = parser.add_subparsers(
help='This tool has multiple modes of operation:',
dest='mode')
parser_a = subparsers.add_parser(
'benchmarks',
help='The most simple use-case, compare all the output of these two benchmarks')
baseline = parser_a.add_argument_group(
'baseline', 'The benchmark baseline')
baseline.add_argument(
'test_baseline',
metavar='test_baseline',
type=argparse.FileType('r'),
nargs=1,
help='A benchmark executable or JSON output file')
contender = parser_a.add_argument_group(
'contender', 'The benchmark that will be compared against the baseline')
contender.add_argument(
'test_contender',
metavar='test_contender',
type=argparse.FileType('r'),
nargs=1,
help='A benchmark executable or JSON output file')
parser_a.add_argument(
'benchmark_options',
metavar='benchmark_options',
nargs=argparse.REMAINDER,
help='Arguments to pass when running benchmark executables')
parser_b = subparsers.add_parser(
'filters', help='Compare filter one with the filter two of benchmark')
baseline = parser_b.add_argument_group(
'baseline', 'The benchmark baseline')
baseline.add_argument(
'test',
metavar='test',
type=argparse.FileType('r'),
nargs=1,
help='A benchmark executable or JSON output file')
baseline.add_argument(
'filter_baseline',
metavar='filter_baseline',
type=str,
nargs=1,
help='The first filter, that will be used as baseline')
contender = parser_b.add_argument_group(
'contender', 'The benchmark that will be compared against the baseline')
contender.add_argument(
'filter_contender',
metavar='filter_contender',
type=str,
nargs=1,
help='The second filter, that will be compared against the baseline')
parser_b.add_argument(
'benchmark_options',
metavar='benchmark_options',
nargs=argparse.REMAINDER,
help='Arguments to pass when running benchmark executables')
parser_c = subparsers.add_parser(
'benchmarksfiltered',
help='Compare filter one of first benchmark with filter two of the second benchmark')
baseline = parser_c.add_argument_group(
'baseline', 'The benchmark baseline')
baseline.add_argument(
'test_baseline',
metavar='test_baseline',
type=argparse.FileType('r'),
nargs=1,
help='A benchmark executable or JSON output file')
baseline.add_argument(
'filter_baseline',
metavar='filter_baseline',
type=str,
nargs=1,
help='The first filter, that will be used as baseline')
contender = parser_c.add_argument_group(
'contender', 'The benchmark that will be compared against the baseline')
contender.add_argument(
'test_contender',
metavar='test_contender',
type=argparse.FileType('r'),
nargs=1,
help='The second benchmark executable or JSON output file, that will be compared against the baseline')
contender.add_argument(
'filter_contender',
metavar='filter_contender',
type=str,
nargs=1,
help='The second filter, that will be compared against the baseline')
parser_c.add_argument(
'benchmark_options',
metavar='benchmark_options',
nargs=argparse.REMAINDER,
help='Arguments to pass when running benchmark executables')
return parser
def main():
# Parse the command line flags
parser = create_parser()
args, unknown_args = parser.parse_known_args()
if args.mode is None:
parser.print_help()
exit(1)
assert not unknown_args
benchmark_options = args.benchmark_options
if args.mode == 'benchmarks':
test_baseline = args.test_baseline[0].name
test_contender = args.test_contender[0].name
filter_baseline = ''
filter_contender = ''
# NOTE: if test_baseline == test_contender, you are analyzing the stdev
description = 'Comparing %s to %s' % (test_baseline, test_contender)
elif args.mode == 'filters':
test_baseline = args.test[0].name
test_contender = args.test[0].name
filter_baseline = args.filter_baseline[0]
filter_contender = args.filter_contender[0]
# NOTE: if filter_baseline == filter_contender, you are analyzing the
# stdev
description = 'Comparing %s to %s (from %s)' % (
filter_baseline, filter_contender, args.test[0].name)
elif args.mode == 'benchmarksfiltered':
test_baseline = args.test_baseline[0].name
test_contender = args.test_contender[0].name
filter_baseline = args.filter_baseline[0]
filter_contender = args.filter_contender[0]
# NOTE: if test_baseline == test_contender and
# filter_baseline == filter_contender, you are analyzing the stdev
description = 'Comparing %s (from %s) to %s (from %s)' % (
filter_baseline, test_baseline, filter_contender, test_contender)
else:
# should never happen
print("Unrecognized mode of operation: '%s'" % args.mode)
parser.print_help()
exit(1)
check_inputs(test_baseline, test_contender, benchmark_options)
options_baseline = []
options_contender = []
if filter_baseline and filter_contender:
options_baseline = ['--benchmark_filter=%s' % filter_baseline]
options_contender = ['--benchmark_filter=%s' % filter_contender]
# Run the benchmarks and report the results
json1 = json1_orig = gbench.util.run_or_load_benchmark(
test_baseline, benchmark_options + options_baseline)
json2 = json2_orig = gbench.util.run_or_load_benchmark(
test_contender, benchmark_options + options_contender)
# Now, filter the benchmarks so that the difference report can work
if filter_baseline and filter_contender:
replacement = '[%s vs. %s]' % (filter_baseline, filter_contender)
json1 = gbench.report.filter_benchmark(
json1_orig, filter_baseline, replacement)
json2 = gbench.report.filter_benchmark(
json2_orig, filter_contender, replacement)
# Diff and output
output_lines = gbench.report.generate_difference_report(json1, json2)
print(description)
for ln in output_lines:
print(ln)
import unittest
class TestParser(unittest.TestCase):
def setUp(self):
self.parser = create_parser()
testInputs = os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
'gbench',
'Inputs')
self.testInput0 = os.path.join(testInputs, 'test1_run1.json')
self.testInput1 = os.path.join(testInputs, 'test1_run2.json')
def test_benchmarks_basic(self):
parsed = self.parser.parse_args(
['benchmarks', self.testInput0, self.testInput1])
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertFalse(parsed.benchmark_options)
def test_benchmarks_with_remainder(self):
parsed = self.parser.parse_args(
['benchmarks', self.testInput0, self.testInput1, 'd'])
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.benchmark_options, ['d'])
def test_benchmarks_with_remainder_after_doubleminus(self):
parsed = self.parser.parse_args(
['benchmarks', self.testInput0, self.testInput1, '--', 'e'])
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.benchmark_options, ['e'])
def test_filters_basic(self):
parsed = self.parser.parse_args(
['filters', self.testInput0, 'c', 'd'])
self.assertEqual(parsed.mode, 'filters')
self.assertEqual(parsed.test[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.filter_contender[0], 'd')
self.assertFalse(parsed.benchmark_options)
def test_filters_with_remainder(self):
parsed = self.parser.parse_args(
['filters', self.testInput0, 'c', 'd', 'e'])
self.assertEqual(parsed.mode, 'filters')
self.assertEqual(parsed.test[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.filter_contender[0], 'd')
self.assertEqual(parsed.benchmark_options, ['e'])
def test_filters_with_remainder_after_doubleminus(self):
parsed = self.parser.parse_args(
['filters', self.testInput0, 'c', 'd', '--', 'f'])
self.assertEqual(parsed.mode, 'filters')
self.assertEqual(parsed.test[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.filter_contender[0], 'd')
self.assertEqual(parsed.benchmark_options, ['f'])
def test_benchmarksfiltered_basic(self):
parsed = self.parser.parse_args(
['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e'])
self.assertEqual(parsed.mode, 'benchmarksfiltered')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.filter_contender[0], 'e')
self.assertFalse(parsed.benchmark_options)
def test_benchmarksfiltered_with_remainder(self):
parsed = self.parser.parse_args(
['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', 'f'])
self.assertEqual(parsed.mode, 'benchmarksfiltered')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.filter_contender[0], 'e')
self.assertEqual(parsed.benchmark_options[0], 'f')
def test_benchmarksfiltered_with_remainder_after_doubleminus(self):
parsed = self.parser.parse_args(
['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', '--', 'g'])
self.assertEqual(parsed.mode, 'benchmarksfiltered')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.filter_contender[0], 'e')
self.assertEqual(parsed.benchmark_options[0], 'g')
if __name__ == '__main__':
# unittest.main()
main()
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
# kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off;
# kate: indent-mode python; remove-trailing-spaces modified;

View File

@@ -1,67 +0,0 @@
#!/usr/bin/env python
"""
compare_bench.py - Compare two benchmarks or their results and report the
difference.
"""
import argparse
from argparse import ArgumentParser
import sys
import gbench
from gbench import util, report
from gbench.util import *
def check_inputs(in1, in2, flags):
"""
Perform checking on the user provided inputs and diagnose any abnormalities
"""
in1_kind, in1_err = classify_input_file(in1)
in2_kind, in2_err = classify_input_file(in2)
output_file = find_benchmark_flag('--benchmark_out=', flags)
output_type = find_benchmark_flag('--benchmark_out_format=', flags)
if in1_kind == IT_Executable and in2_kind == IT_Executable and output_file:
print(("WARNING: '--benchmark_out=%s' will be passed to both "
"benchmarks causing it to be overwritten") % output_file)
if in1_kind == IT_JSON and in2_kind == IT_JSON and len(flags) > 0:
print("WARNING: passing --benchmark flags has no effect since both "
"inputs are JSON")
if output_type is not None and output_type != 'json':
print(("ERROR: passing '--benchmark_out_format=%s' to 'compare_bench.py`"
" is not supported.") % output_type)
sys.exit(1)
def main():
parser = ArgumentParser(
description='compare the results of two benchmarks')
parser.add_argument(
'test1', metavar='test1', type=str, nargs=1,
help='A benchmark executable or JSON output file')
parser.add_argument(
'test2', metavar='test2', type=str, nargs=1,
help='A benchmark executable or JSON output file')
parser.add_argument(
'benchmark_options', metavar='benchmark_options', nargs=argparse.REMAINDER,
help='Arguments to pass when running benchmark executables'
)
args, unknown_args = parser.parse_known_args()
# Parse the command line flags
test1 = args.test1[0]
test2 = args.test2[0]
if unknown_args:
# should never happen
print("Unrecognized positional argument arguments: '%s'"
% unknown_args)
exit(1)
benchmark_options = args.benchmark_options
check_inputs(test1, test2, benchmark_options)
# Run the benchmarks and report the results
json1 = gbench.util.run_or_load_benchmark(test1, benchmark_options)
json2 = gbench.util.run_or_load_benchmark(test2, benchmark_options)
output_lines = gbench.report.generate_difference_report(json1, json2)
print('Comparing %s to %s' % (test1, test2))
for ln in output_lines:
print(ln)
if __name__ == '__main__':
main()

View File

@@ -1,102 +0,0 @@
{
"context": {
"date": "2016-08-02 17:44:46",
"num_cpus": 4,
"mhz_per_cpu": 4228,
"cpu_scaling_enabled": false,
"library_build_type": "release"
},
"benchmarks": [
{
"name": "BM_SameTimes",
"iterations": 1000,
"real_time": 10,
"cpu_time": 10,
"time_unit": "ns"
},
{
"name": "BM_2xFaster",
"iterations": 1000,
"real_time": 50,
"cpu_time": 50,
"time_unit": "ns"
},
{
"name": "BM_2xSlower",
"iterations": 1000,
"real_time": 50,
"cpu_time": 50,
"time_unit": "ns"
},
{
"name": "BM_1PercentFaster",
"iterations": 1000,
"real_time": 100,
"cpu_time": 100,
"time_unit": "ns"
},
{
"name": "BM_1PercentSlower",
"iterations": 1000,
"real_time": 100,
"cpu_time": 100,
"time_unit": "ns"
},
{
"name": "BM_10PercentFaster",
"iterations": 1000,
"real_time": 100,
"cpu_time": 100,
"time_unit": "ns"
},
{
"name": "BM_10PercentSlower",
"iterations": 1000,
"real_time": 100,
"cpu_time": 100,
"time_unit": "ns"
},
{
"name": "BM_100xSlower",
"iterations": 1000,
"real_time": 100,
"cpu_time": 100,
"time_unit": "ns"
},
{
"name": "BM_100xFaster",
"iterations": 1000,
"real_time": 10000,
"cpu_time": 10000,
"time_unit": "ns"
},
{
"name": "BM_10PercentCPUToTime",
"iterations": 1000,
"real_time": 100,
"cpu_time": 100,
"time_unit": "ns"
},
{
"name": "BM_ThirdFaster",
"iterations": 1000,
"real_time": 100,
"cpu_time": 100,
"time_unit": "ns"
},
{
"name": "BM_BadTimeUnit",
"iterations": 1000,
"real_time": 0.4,
"cpu_time": 0.5,
"time_unit": "s"
},
{
"name": "BM_DifferentTimeUnit",
"iterations": 1,
"real_time": 1,
"cpu_time": 1,
"time_unit": "s"
}
]
}

View File

@@ -1,102 +0,0 @@
{
"context": {
"date": "2016-08-02 17:44:46",
"num_cpus": 4,
"mhz_per_cpu": 4228,
"cpu_scaling_enabled": false,
"library_build_type": "release"
},
"benchmarks": [
{
"name": "BM_SameTimes",
"iterations": 1000,
"real_time": 10,
"cpu_time": 10,
"time_unit": "ns"
},
{
"name": "BM_2xFaster",
"iterations": 1000,
"real_time": 25,
"cpu_time": 25,
"time_unit": "ns"
},
{
"name": "BM_2xSlower",
"iterations": 20833333,
"real_time": 100,
"cpu_time": 100,
"time_unit": "ns"
},
{
"name": "BM_1PercentFaster",
"iterations": 1000,
"real_time": 98.9999999,
"cpu_time": 98.9999999,
"time_unit": "ns"
},
{
"name": "BM_1PercentSlower",
"iterations": 1000,
"real_time": 100.9999999,
"cpu_time": 100.9999999,
"time_unit": "ns"
},
{
"name": "BM_10PercentFaster",
"iterations": 1000,
"real_time": 90,
"cpu_time": 90,
"time_unit": "ns"
},
{
"name": "BM_10PercentSlower",
"iterations": 1000,
"real_time": 110,
"cpu_time": 110,
"time_unit": "ns"
},
{
"name": "BM_100xSlower",
"iterations": 1000,
"real_time": 1.0000e+04,
"cpu_time": 1.0000e+04,
"time_unit": "ns"
},
{
"name": "BM_100xFaster",
"iterations": 1000,
"real_time": 100,
"cpu_time": 100,
"time_unit": "ns"
},
{
"name": "BM_10PercentCPUToTime",
"iterations": 1000,
"real_time": 110,
"cpu_time": 90,
"time_unit": "ns"
},
{
"name": "BM_ThirdFaster",
"iterations": 1000,
"real_time": 66.665,
"cpu_time": 66.664,
"time_unit": "ns"
},
{
"name": "BM_BadTimeUnit",
"iterations": 1000,
"real_time": 0.04,
"cpu_time": 0.6,
"time_unit": "s"
},
{
"name": "BM_DifferentTimeUnit",
"iterations": 1,
"real_time": 1,
"cpu_time": 1,
"time_unit": "ns"
}
]
}

View File

@@ -1,81 +0,0 @@
{
"context": {
"date": "2016-08-02 17:44:46",
"num_cpus": 4,
"mhz_per_cpu": 4228,
"cpu_scaling_enabled": false,
"library_build_type": "release"
},
"benchmarks": [
{
"name": "BM_Hi",
"iterations": 1234,
"real_time": 42,
"cpu_time": 24,
"time_unit": "ms"
},
{
"name": "BM_Zero",
"iterations": 1000,
"real_time": 10,
"cpu_time": 10,
"time_unit": "ns"
},
{
"name": "BM_Zero/4",
"iterations": 4000,
"real_time": 40,
"cpu_time": 40,
"time_unit": "ns"
},
{
"name": "Prefix/BM_Zero",
"iterations": 2000,
"real_time": 20,
"cpu_time": 20,
"time_unit": "ns"
},
{
"name": "Prefix/BM_Zero/3",
"iterations": 3000,
"real_time": 30,
"cpu_time": 30,
"time_unit": "ns"
},
{
"name": "BM_One",
"iterations": 5000,
"real_time": 5,
"cpu_time": 5,
"time_unit": "ns"
},
{
"name": "BM_One/4",
"iterations": 2000,
"real_time": 20,
"cpu_time": 20,
"time_unit": "ns"
},
{
"name": "Prefix/BM_One",
"iterations": 1000,
"real_time": 10,
"cpu_time": 10,
"time_unit": "ns"
},
{
"name": "Prefix/BM_One/3",
"iterations": 1500,
"real_time": 15,
"cpu_time": 15,
"time_unit": "ns"
},
{
"name": "BM_Bye",
"iterations": 5321,
"real_time": 11,
"cpu_time": 63,
"time_unit": "ns"
}
]
}

Some files were not shown because too many files have changed in this diff Show More