Skip to content

Commit

Permalink
silent ot opimizations
Browse files Browse the repository at this point in the history
  • Loading branch information
ladnir committed Apr 5, 2024
1 parent 67af6a4 commit 987f05b
Show file tree
Hide file tree
Showing 10 changed files with 315 additions and 239 deletions.
2 changes: 1 addition & 1 deletion cryptoTools
7 changes: 6 additions & 1 deletion libOTe/Tools/CoeffCtx.h
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,12 @@ namespace osuCrypto {
OC_FORCEINLINE void fromBlock(F& ret, const block& b) {
static_assert(std::is_trivially_copyable<F>::value, "memcpy is used so must be trivially_copyable.");

if constexpr (sizeof(F) <= sizeof(block))
if constexpr (std::is_same<F,block>::value)
{
// if F is a block, just return the block.
ret = b;
}
else if constexpr (sizeof(F) <= sizeof(block))
{
// if small, just return the bytes of b
memcpy(&ret, &b, sizeof(F));
Expand Down
120 changes: 62 additions & 58 deletions libOTe/Tools/Pprf/PprfUtil.h
Original file line number Diff line number Diff line change
Expand Up @@ -138,13 +138,13 @@ namespace osuCrypto

// num of bytes they will take up.
u64 numBytes =
depth * numTrees * sizeof(std::array<block,2>) + // each internal level of the tree has two sums
depth * numTrees * sizeof(std::array<block, 2>) + // each internal level of the tree has two sums
elementSize * numTrees * 2 + // we must program numTrees inactive F leaves
elementSize * numTrees * 2 * programPuncturedPoint; // if we are programing the active lead, then we have numTrees more.

// allocate the buffer and partition them.
buff.resize(numBytes);
sums = span<std::array<block, 2>>((std::array<block,2>*)buff.data(), depth * numTrees);
sums = span<std::array<block, 2>>((std::array<block, 2>*)buff.data(), depth * numTrees);
leaf = span<u8>((u8*)(sums.data() + sums.size()),
elementSize * numTrees * 2 +
elementSize * numTrees * 2 * programPuncturedPoint
Expand Down Expand Up @@ -188,76 +188,80 @@ namespace osuCrypto
}


struct TreeAllocator
inline void allocateExpandTree(
u64 domainSize,
AlignedUnVector<block>& alloc,
std::vector<span<AlignedArray<block, 8>>>& levels,
bool reuseLevel = true)
{
TreeAllocator() = default;
TreeAllocator(const TreeAllocator&) = delete;
TreeAllocator(TreeAllocator&&) = default;

using ValueType = AlignedArray<block, 8>;
std::list<AlignedUnVector<ValueType>> mTrees;
std::vector<span<ValueType>> mFreeTrees;
//std::mutex mMutex;
u64 mTreeSize = 0, mNumTrees = 0;
auto depth = log2ceil(domainSize);
levels.resize(depth + 1);

void reserve(u64 num, u64 size)
if (reuseLevel)
{
//std::lock_guard<std::mutex> lock(mMutex);
mTreeSize = size;
mNumTrees += num;
mTrees.clear();
mFreeTrees.clear();
mTrees.emplace_back(num * size);
auto iter = mTrees.back().data();
for (u64 i = 0; i < num; ++i)
auto secondLast = roundUpTo((domainSize + 1) / 2,2);
auto size = roundUpTo((domainSize + secondLast),2);

// we will allocate the last twoo levels of the tree.
// these levels will be used for the smaller levels as
// well. We will alternate between the two.
alloc.clear();
alloc.resize(size * 8);

std::array<span<AlignedArray<block, 8>>, 2> buffs;
buffs[0] = { (AlignedArray<block, 8>*)alloc.data(), secondLast };
buffs[1] = { (AlignedArray<block, 8>*)alloc.data() + secondLast , domainSize };

// give the last level the big buffer.
levels.back() = buffs[1].subspan(0, domainSize);
for (u64 i = levels.size() - 2, j = 0ull; i < levels.size(); --i, ++j)
{
mFreeTrees.push_back(span<ValueType>(iter, size));
assert((u64)mFreeTrees.back().data() % 32 == 0);
iter += size;
auto width = divCeil(domainSize, 1ull << (depth - i));
assert(
levels[i + 1].size() == 2 * width ||
levels[i + 1].size() == 2 * width - 1);

if (width > 1)
width = roundUpTo(width, 2);

// each level will be half the size of the next level.
// we alternate which buffer we use.
levels[i] = buffs[j % 2].subspan(0, width);
}
}

span<ValueType> get()
else
{
//std::lock_guard<std::mutex> lock(mMutex);
if (mFreeTrees.size() == 0)
u64 totalSize = 0;
for (u64 i = 0; i < levels.size(); ++i)
{
assert(mTreeSize);
mTrees.emplace_back(mTreeSize);
mFreeTrees.push_back(span<ValueType>(mTrees.back().data(), mTreeSize));
assert((u64)mFreeTrees.back().data() % 32 == 0);
++mNumTrees;
auto width = divCeil(domainSize, 1ull << (depth - i));
totalSize += roundUpTo(width, 2);
}

auto ret = mFreeTrees.back();
mFreeTrees.pop_back();
return ret;
}
alloc.clear();
alloc.resize(totalSize * 8);
span<AlignedArray<block, 8>> buff((AlignedArray<block, 8>*)alloc.data(), totalSize);

void clear()
{
mTrees = {};
mFreeTrees = {};
mTreeSize = 0;
mNumTrees = 0;
}
};
levels.back() = buff.subspan(0, domainSize);
buff = buff.subspan(domainSize);
for (u64 i = levels.size() - 2, j = 0ull; i < levels.size(); --i, ++j)
{
// each level will be half the size of the next level.
auto width = divCeil(domainSize, 1ull << (depth - i));
assert(
levels[i + 1].size() == 2 * width ||
levels[i + 1].size() == 2 * width - 1);

if(width > 1)
width = roundUpTo(width, 2);

inline void allocateExpandTree(
TreeAllocator& alloc,
std::vector<span<AlignedArray<block, 8>>>& levels)
{
span<AlignedArray<block, 8>> tree = alloc.get();
assert((u64)tree.data() % 32 == 0);
levels[0] = tree.subspan(0, 1);
auto rem = tree.subspan(2);
for (auto i = 1ull; i < levels.size(); ++i)
{
levels[i] = rem.subspan(0, levels[i - 1].size() * 2);
assert((u64)levels[i].data() % 32 == 0);
rem = rem.subspan(levels[i].size());
levels[i] = buff.subspan(0, width);
buff = buff.subspan(levels[i].size());
}
}

if (levels[0].size() != 1)
throw RTE_LOC;
}


Expand Down
Loading

0 comments on commit 987f05b

Please sign in to comment.