我应该转储java.util.HashSet以支持CompactHashSet吗?

gvl*_*sov 8 java collections performance big-o hashset

我发现有一个Set使用哈希(具有所有有用的后果,如O(1)contains()等)的实现,声称比java.util.HashSet每个方面都更有效:

http://ontopia.wordpress.com/2009/09/23/a-faster-and-more-compact-set/

http://alias-i.com/lingpipe/docs/api/com/aliasi/util/CompactHashSet.html

那么在java.util.HashSet任何我需要java.util.Set支持的地方完全戒掉它会是一个好主意 com.aliasi.util.CompactHashSet吗?

lev*_*tov 9

让我们开始一个小基准游戏.

基准测试基于原始文章的基准测试,但使用现代工具.

package tests;

import com.carrotsearch.hppc.ObjectOpenHashSet;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.google.common.collect.GuavaCompactHashSet;
import net.ontopia.utils.CompactHashSet;
import net.openhft.koloboke.collect.set.hash.HashObjSet;
import net.openhft.koloboke.collect.set.hash.HashObjSets;
import org.openjdk.jmh.annotations.*;

import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.function.Consumer;

import static java.util.Arrays.stream;
import static org.openjdk.jol.info.GraphLayout.parseInstance;

@BenchmarkMode(Mode.AverageTime)
@OutputTimeUnit(TimeUnit.NANOSECONDS)
@OperationsPerInvocation(TestHashSet.TIMES)
@Threads(1)
@Fork(1)
@State(Scope.Thread)
public class TestHashSet {
    public static final int TIMES = 1000000;
    private static final int MAX = 5000000;
    private static long ELEMENTS_SIZE;

    static Long[] add = new Long[TIMES], lookup = new Long[TIMES], remove = new Long[TIMES];
    static {
        for (int ix = 0; ix < TIMES; ix++)
            add[ix] = new Long(Math.round(Math.random() * MAX));
        ELEMENTS_SIZE = stream(add).distinct().count() * parseInstance(add[0]).totalSize();
        for (int ix = 0; ix < TIMES; ix++)
            lookup[ix] = new Long(Math.round(Math.random() * MAX));
        for (int ix = 0; ix < TIMES; ix++)
            remove[ix] = new Long(Math.round(Math.random() * MAX));
    }

    @Benchmark
    public int hashSet() {
        Set<Long> set = new HashSet<Long>();
        for (Long o : add) {
            set.add(o);
        }
        int r = 0;
        for (Long o : lookup) {
            r ^= set.contains(o) ? 1 : 0;
        }
        for (Long o : set) {
            r += o.intValue();
        }
        for (Long o : remove) {
            set.remove(o);
        }
        return r + set.size();
    }

    @Benchmark
    public int compactHashSet() {
        Set<Long> set = new CompactHashSet<Long>();
        for (Long o : add) {
            set.add(o);
        }
        int r = 0;
        for (Long o : lookup) {
            r ^= set.contains(o) ? 1 : 0;
        }
        for (Long o : set) {
            r += o.intValue();
        }
        for (Long o : remove) {
            set.remove(o);
        }
        return r + set.size();
    }

    @Benchmark
    public int hppcSet() {
        ObjectOpenHashSet<Long> set = new ObjectOpenHashSet<Long>();
        for (Long o : add) {
            set.add(o);
        }
        int r = 0;
        for (Long o : lookup) {
            r ^= set.contains(o) ? 1 : 0;
        }
        for (ObjectCursor<Long> cur : set) {
            r += cur.value.intValue();
        }
        for (Long o : remove) {
            set.remove(o);
        }
        return r + set.size();
    }

    @Benchmark
    public int kolobokeSet() {
        Set<Long> set = HashObjSets.newMutableSet();
        for (Long o : add) {
            set.add(o);
        }
        int r = 0;
        for (Long o : lookup) {
            r ^= set.contains(o) ? 1 : 0;
        }
        for (Long o : set) {
            r += o.intValue();
        }
        for (Long o : remove) {
            set.remove(o);
        }
        return r + set.size();
    }

    @Benchmark
    public int guavaCompactHashSet() {
        // fair comparison -- growing table
        Set<Long> set = new GuavaCompactHashSet<>(10);
        for (Long o : add) {
            set.add(o);
        }
        int r = 0;
        for (Long o : lookup) {
            r ^= set.contains(o) ? 1 : 0;
        }
        for (Long o : set) {
            r += o.intValue();
        }
        for (Long o : remove) {
            set.remove(o);
        }
        return r + set.size();
    }

    public static void main(String[] argv) {
        HashSet hashSet = new HashSet();
        test("HashSet", hashSet, hashSet::add);
        CompactHashSet compactHashSet = new CompactHashSet();
        test("CompactHashSet", compactHashSet, compactHashSet::add);
        HashObjSet<Object> kolobokeSet = HashObjSets.newMutableSet();
        test("KolobokeSet", kolobokeSet, kolobokeSet::add);
        ObjectOpenHashSet hppcSet = new ObjectOpenHashSet();
        test("HPPC set", hppcSet, hppcSet::add);
        GuavaCompactHashSet guavaCompactHashSet = new GuavaCompactHashSet(10);
        test("GuavaCompactHashSet", guavaCompactHashSet, guavaCompactHashSet::add);
    }

    public static void test(String name, Object set, Consumer setAdd) {
        for (Long o : add) {
            setAdd.accept(o);
        }
        System.out.printf("%s: %.1f bytes per element\n", name,
                ((parseInstance(set).totalSize() - ELEMENTS_SIZE) * 1.0 / TIMES));

    }
}
Run Code Online (Sandbox Code Playgroud)

结果:

Set implementation   Speed           Memory footprint
                     Score Units     +UCOops -UseCompressedOops
CompactHashSet       828   ns/op     8.4     16.8    bytes/elem
HashSet              676   ns/op     37.4    60.3    bytes/elem
HPPC Set             853   ns/op     10.5    18.9    bytes/elem
Koloboke Set         587   ns/op     8.4     16.8    bytes/elem
GuavaCompactHashSet  874   ns/op     25.9    37.4    bytes/elem
Run Code Online (Sandbox Code Playgroud)

虽然它使用的内存少得多,但它看起来CompactHashSet比旧的好HashSet.

  • 1. 存在一些权衡。您可以使用更密集的“HashMap”并使其更小且更慢。2.您的数据是完全随机的,这可能有利于某些实现。3. 你的“查找”几乎找不到任何东西,这也可能会产生偏见。4. 与“删除”相同。5. 实际上,需要许多不同的基准才能获得完整的答案(但我们能用这么多数字做什么)。 (2认同)