\r\n

51Degrees IP Intelligence Java  4.4

IP Intelligence services for 51Degrees Pipeline

PerformanceBenchmark.java

The example illustrates a "clock-time" benchmark for assessing detection speed.

Using a YAML formatted evidence file - "evidence.yml" - supplied with the distribution or can be obtained from the data repository on Github.

It's important to understand the trade-offs between performance, memory usage and accuracy, that the 51Degrees pipeline configuration makes available, and this example shows a range of different configurations to illustrate the difference in performance.

Requesting properties from a single component reduces detection time compared with requesting properties from multiple components. If you don't specify any properties to detect, then all properties are detected.

Please review [performance options](https://51degrees.com/documentation/_device_detection__features__performance_options.html] and hash dataset options for more information about adjusting performance.

This example is available in full on GitHub.

This example requires a local data file. The free 'Lite' data file can be acquired by pulling the git submodules under this repository (run `git submodule update --recursive`) or from the device-detection-data GitHub repository.

The Lite data file is only used for illustration, and has limited accuracy and capabilities. Find out about the more capable data files that are available on our pricing page

/* *********************************************************************
* This Original Work is copyright of 51 Degrees Mobile Experts Limited.
* Copyright 2025 51 Degrees Mobile Experts Limited, Davidson House,
* Forbury Square, Reading, Berkshire, United Kingdom RG1 3EU.
*
* This Original Work is licensed under the European Union Public Licence
* (EUPL) v.1.2 and is subject to its terms as set out below.
*
* If a copy of the EUPL was not distributed with this file, You can obtain
* one at https://opensource.org/licenses/EUPL-1.2.
*
* The 'Compatible Licences' set out in the Appendix to the EUPL (as may be
* amended by the European Commission) shall be deemed incompatible for
* the purposes of the Work and the provisions of the compatibility
* clause in Article 5 of the EUPL shall not apply.
*
* If using the Work as, or as part of, a network application, by
* including the attribution notice(s) required under Article 5 of the EUPL
* in the end user terms of the application under an appropriate heading,
* such notice(s) shall fulfill the requirements of that article.
* ********************************************************************* */
package fiftyone.ipintelligence.examples.console;
import fiftyone.common.testhelpers.LogbackHelper;
import fiftyone.ipintelligence.IPIntelligenceOnPremisePipelineBuilder;
import fiftyone.ipintelligence.IPIntelligencePipelineBuilder;
import fiftyone.ipintelligence.examples.shared.DataFileHelper;
import fiftyone.ipintelligence.examples.shared.EvidenceHelper;
import fiftyone.ipintelligence.engine.onpremise.flowelements.IPIntelligenceOnPremiseEngine;
import fiftyone.ipintelligence.shared.IPIntelligenceData;
import fiftyone.pipeline.core.data.FlowData;
import fiftyone.pipeline.core.data.IWeightedValue;
import fiftyone.pipeline.core.flowelements.Pipeline;
import fiftyone.pipeline.engines.Constants;
import fiftyone.pipeline.util.FileFinder;
import org.apache.commons.lang3.BooleanUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.MarkerFactory;
import java.io.File;
import java.io.PrintWriter;
import java.nio.file.Files;
import java.util.*;
import java.util.concurrent.*;
import static fiftyone.ipintelligence.examples.shared.DataFileHelper.getDataFileLocation;
import static fiftyone.ipintelligence.examples.shared.DataFileHelper.getEvidenceFile;
import static fiftyone.pipeline.engines.Constants.PerformanceProfiles.*;
public class PerformanceBenchmark {
// the default number of threads if one is not provided.
public static final int DEFAULT_NUMBER_OF_THREADS = 4;
// the number of tests to execute.
public static final int TESTS_PER_THREAD = 10000;
public static final Logger logger = LoggerFactory.getLogger(PerformanceBenchmark.class);
// where the results of the tests are gathered
private List<Future<BenchmarkResult>> resultList;
private int numberOfThreads = DEFAULT_NUMBER_OF_THREADS;
private List<Map<String, String>> evidence;
private String dataFileLocation;
private PrintWriter writer;
// a default set of configurations: (profile, allProperties, performanceGraph, predictiveGraph)
public static PerformanceConfiguration [] DEFAULT_PERFORMANCE_CONFIGURATIONS = {
new PerformanceConfiguration(MaxPerformance, false, false, true),
new PerformanceConfiguration(MaxPerformance, false, true, false),
new PerformanceConfiguration(MaxPerformance, true, true, false)
};
public static void main(String[] args) throws Exception {
LogbackHelper.configureLogback(FileFinder.getFilePath("logback.xml"));
String dataFilename = args.length > 0 ? args[0] : null;
String evidenceFilename = args.length > 1 ? args[1] : null;
int numberOfThreads = DEFAULT_NUMBER_OF_THREADS;
if (args.length > 2) {
numberOfThreads = Integer.parseInt(args[2]);
}
new PerformanceBenchmark().runBenchmarks(DEFAULT_PERFORMANCE_CONFIGURATIONS,
dataFilename,
evidenceFilename,
numberOfThreads,
new PrintWriter(System.out,true));
}
protected void runBenchmarks(PerformanceConfiguration[] performanceConfigurations,
String dataFilename,
String evidenceFilename,
int numberOfThreads,
PrintWriter writer) throws Exception {
logger.info("Running Performance example");
this.dataFileLocation = getDataFileLocation(dataFilename);
File evidenceFile = getEvidenceFile(evidenceFilename);
this.evidence = Collections.unmodifiableList(
EvidenceHelper.getEvidenceList(evidenceFile, 20000));
this.numberOfThreads = numberOfThreads;
this.writer = writer;
// run "from memory" benchmarks - the only profiles that really make sense
// are maxPerformance
for (PerformanceConfiguration config: performanceConfigurations){
if (!config.profile.equals(MaxPerformance)) {
// TODO: Remove this check
continue;
}
executeBenchmark(config);
}
logger.info("Finished Performance example");
}
private void executeBenchmark(PerformanceConfiguration config) throws Exception {
logger.info(MarkerFactory.getMarker(config.profile.name() + " " +
config.allProperties + " " +
config.performanceGraph + " " +
config.predictiveGraph),
"Benchmarking with profile: {} AllProperties: {}, " +
"performanceGraph: {}, predictiveGraph {}",
config.profile,
config.allProperties,
config.performanceGraph,
config.predictiveGraph);
Pipeline pipeline = null;
try {
logger.info("Load from disk");
IPIntelligenceOnPremisePipelineBuilder builder = new IPIntelligencePipelineBuilder()
// load from disk
.useOnPremise(dataFileLocation, false);
setPipelinePerformanceProperties(builder, config);
pipeline = builder.build();
DataFileHelper.logDataFileInfo(pipeline.getElement(IPIntelligenceOnPremiseEngine.class));
// run the benchmarks twice, once to warm up the JVM
logger.info("Warming up");
runTests(pipeline);
System.gc();
Thread.sleep(300);
logger.info("Running");
long executionTime = runTests(pipeline);
logger.info("Finished - Execution time was {} ms", executionTime);
} finally {
if (Objects.nonNull(pipeline)) {
pipeline.close();
}
}
doReport();
}
private void setPipelinePerformanceProperties(
IPIntelligenceOnPremisePipelineBuilder builder,
PerformanceConfiguration config) {
// the different profiles provide for trading off memory usage
builder.setPerformanceProfile(config.profile)
// set this to false for testing
.setAutoUpdate(false)
// set this to false for testing
.setShareUsage(false)
// hint for cache concurrency
.setConcurrency(numberOfThreads);
// performance is improved by selecting only the properties you intend to use
// Requesting properties from a single component
// reduces detection time compared with requesting properties from multiple components.
// If you don't specify any properties to detect, then all properties are detected,
// here we choose "all properties" by specifying none, or just "isMobile"
if (BooleanUtils.isFalse(config.allProperties)) {
builder.setProperty("RegisteredName");
}
}
private void doReport() throws Exception {
long totalMillis = 0;
long totalChecks = 0;
int checksum = 0;
for (Future<BenchmarkResult> result : resultList) {
BenchmarkResult bmr = result.get();
writer.format("Thread: %,d detections, elapsed %f seconds, %,d Detections per second%n",
bmr.count,
bmr.elapsedMillis/1000.0,
(Math.round(1000.0 * bmr.count/ bmr.elapsedMillis)));
totalMillis += bmr.elapsedMillis;
totalChecks += bmr.count;
checksum += bmr.checkSum;
}
// output the results from the benchmark to the console
double millisPerTest = ((double) totalMillis / (numberOfThreads * totalChecks));
writer.format("Overall: %,d detections, Average millisecs per detection: %f, Detections per second: %,d\n",
totalChecks, millisPerTest, Math.round(1000.0/millisPerTest));
writer.format("Overall: Concurrent threads: %d, Checksum: %x \n", numberOfThreads, checksum);
writer.println();
}
private long runTests(Pipeline pipeline) throws Exception {
// create a list of callables
List<Callable<BenchmarkResult>> callables = new ArrayList<>();
for (int i = 0; i < numberOfThreads; i++) {
callables.add(new BenchmarkRunnable(pipeline, evidence));
}
// start multiple threads in a fixed pool
ExecutorService service = Executors.newFixedThreadPool(numberOfThreads);
long start = System.currentTimeMillis();
// start all the threads
resultList = service.invokeAll(callables);
// wait for all the threads to complete
for (Future<BenchmarkResult> result : resultList) {
result.get();
}
long duration = System.currentTimeMillis() - start;
service.shutdown();
return duration;
}
private static class BenchmarkRunnable implements Callable<BenchmarkResult> {
// the benchmark that is being executed
private final BenchmarkResult result;
private final List<Map<String, String>> testList;
private final Pipeline pipeline;
BenchmarkRunnable(Pipeline pipeline, List<Map<String, String>> evidence) {
this.testList = evidence;
// initialise the benchmark variables
this.pipeline = pipeline;
this.result = new BenchmarkResult();
result.elapsedMillis = 0;
result.count = 0;
result.checkSum = 0;
}
@Override
public BenchmarkResult call() {
result.checkSum = 0;
long start = System.currentTimeMillis();
for (Map<String, String> evidence : testList) {
// the benchmark is for detection time only
// A try-with-resource block MUST be used for the
// FlowData instance. This ensures that native resources
// created by the IP Intelligence engine are freed.
try (FlowData flowData = pipeline.createFlowData()) {
flowData
.addEvidence(evidence)
.process();
// Calculate a checksum to compare different runs on
// the same data.
IPIntelligenceData device = flowData.get(IPIntelligenceData.class);
if (device != null) {
if (device.getRegisteredName().hasValue()) {
List<IWeightedValue<String>> value = device.getRegisteredName().getValue();
if (value != null) {
for (IWeightedValue<?> weightedValue : value) {
result.checkSum += weightedValue.getValue().hashCode();
}
}
}
}
result.count++;
if (result.count >= TESTS_PER_THREAD) {
break;
}
} catch (Exception e) {
logger.error("Exception getting flow data", e);
}
}
result.elapsedMillis += System.currentTimeMillis() - start;
return result;
}
}
static class BenchmarkResult {
// number of device evidence processed to determine the result.
private long count;
// processing time in millis this thread
private long elapsedMillis;
// used to ensure compiler optimiser doesn't optimise out the very
// method that the benchmark is testing.
private int checkSum;
}
public static class PerformanceConfiguration {
Constants.PerformanceProfiles profile;
boolean allProperties;
boolean performanceGraph;
boolean predictiveGraph;
public PerformanceConfiguration(Constants.PerformanceProfiles profile,
boolean allProperties, boolean performanceGraph,
boolean predictiveGraph) {
this.profile = profile;
this.allProperties = allProperties;
this.performanceGraph = performanceGraph;
this.predictiveGraph = predictiveGraph;
}
}
}