0
0
Fork 0
mirror of https://github.com/bitcoin/bitcoin.git synced 2025-03-05 14:06:27 -05:00

Merge bitcoin/bitcoin#24231: streams: Fix read-past-the-end and integer overflows

fa1b89a6bd scripted-diff: Rename nReadPos to m_read_pos in streams.h (MarcoFalke)
fa56c79df9 Make CDataStream work properly on 64-bit systems (MarcoFalke)
fab02f7991 streams: Fix read-past-the-end and integer overflows (MarcoFalke)

Pull request description:

  This is a follow-up to commit e26b62093a with the following fixes:

  * Fix unsigned integer overflow in `ignore()`, when `nReadPos` wraps.
  * Fix unsigned integer overflow in `read()`, when `nReadPos` wraps.
  * Fix read-past-the-end in `read()`, when `nReadPos` wraps.

  This shouldn't be remote-exploitable, because it requires a stream of more than 1GB of size. However, it might be exploitable if the attacker controls the datadir (I haven't checked).

  A unit test for the overflow in `ignore()` looks like following. It is left as an excercise to the reader to replace `foo.ignore(7)` with the appropriate call to `read()` to reproduce the overflow and read-error in `read()`.

  ```diff
  diff --git a/src/test/coins_tests.cpp b/src/test/coins_tests.cpp
  index 922fd8e513..ec6ea93919 100644
  --- a/src/test/coins_tests.cpp
  +++ b/src/test/coins_tests.cpp
  @@ -534,6 +534,20 @@ BOOST_AUTO_TEST_CASE(ccoins_serialization)
       } catch (const std::ios_base::failure&) {
       }

  +    CDataStream foo{0, 0};
  +    auto size{std::numeric_limits<uint32_t>::max()};
  +    foo.resize(size);
  +    BOOST_CHECK_EQUAL(foo.size(), size);
  +    foo.ignore(std::numeric_limits<int32_t>::max());
  +    size -= std::numeric_limits<int32_t>::max();
  +    BOOST_CHECK_EQUAL(foo.size(), size);
  +    foo.ignore(std::numeric_limits<int32_t>::max());
  +    size -= std::numeric_limits<int32_t>::max();
  +    BOOST_CHECK_EQUAL(foo.size(), size);
  +    BOOST_CHECK_EQUAL(foo.size(), 1);
  +    foo.ignore(7); // Should overflow, as the size is only 1
  +    BOOST_CHECK_EQUAL(foo.size(), uint32_t(1 - 7));
  +
       // Very large scriptPubKey (3*10^9 bytes) past the end of the stream
       CDataStream tmp(SER_DISK, CLIENT_VERSION);
       uint64_t x = 3000000000ULL;
  ```

ACKs for top commit:
  klementtan:
    Code Review ACK fa1b89a6bd:

Tree-SHA512: 67f0a1baafe88eaf1dc844ac55b638d5cf168a18c945e3bf7a2cb03c9a5976674a8e3af2487d8a2c3eae21e5c0e7a519c8b16ee7f104934442e2769d100660e9
This commit is contained in:
MarcoFalke 2022-02-21 08:09:02 +01:00
commit abaf943477
No known key found for this signature in database
GPG key ID: CE2B75697E69A548
2 changed files with 50 additions and 52 deletions

View file

@ -9,6 +9,7 @@
#include <serialize.h>
#include <span.h>
#include <support/allocators/zeroafterfree.h>
#include <util/overflow.h>
#include <algorithm>
#include <assert.h>
@ -186,7 +187,7 @@ class CDataStream
protected:
using vector_type = SerializeData;
vector_type vch;
unsigned int nReadPos{0};
vector_type::size_type m_read_pos{0};
int nType;
int nVersion;
@ -229,37 +230,37 @@ public:
//
// Vector subset
//
const_iterator begin() const { return vch.begin() + nReadPos; }
iterator begin() { return vch.begin() + nReadPos; }
const_iterator begin() const { return vch.begin() + m_read_pos; }
iterator begin() { return vch.begin() + m_read_pos; }
const_iterator end() const { return vch.end(); }
iterator end() { return vch.end(); }
size_type size() const { return vch.size() - nReadPos; }
bool empty() const { return vch.size() == nReadPos; }
void resize(size_type n, value_type c = value_type{}) { vch.resize(n + nReadPos, c); }
void reserve(size_type n) { vch.reserve(n + nReadPos); }
const_reference operator[](size_type pos) const { return vch[pos + nReadPos]; }
reference operator[](size_type pos) { return vch[pos + nReadPos]; }
void clear() { vch.clear(); nReadPos = 0; }
value_type* data() { return vch.data() + nReadPos; }
const value_type* data() const { return vch.data() + nReadPos; }
size_type size() const { return vch.size() - m_read_pos; }
bool empty() const { return vch.size() == m_read_pos; }
void resize(size_type n, value_type c = value_type{}) { vch.resize(n + m_read_pos, c); }
void reserve(size_type n) { vch.reserve(n + m_read_pos); }
const_reference operator[](size_type pos) const { return vch[pos + m_read_pos]; }
reference operator[](size_type pos) { return vch[pos + m_read_pos]; }
void clear() { vch.clear(); m_read_pos = 0; }
value_type* data() { return vch.data() + m_read_pos; }
const value_type* data() const { return vch.data() + m_read_pos; }
inline void Compact()
{
vch.erase(vch.begin(), vch.begin() + nReadPos);
nReadPos = 0;
vch.erase(vch.begin(), vch.begin() + m_read_pos);
m_read_pos = 0;
}
bool Rewind(std::optional<size_type> n = std::nullopt)
{
// Total rewind if no size is passed
if (!n) {
nReadPos = 0;
m_read_pos = 0;
return true;
}
// Rewind by n characters if the buffer hasn't been compacted yet
if (*n > nReadPos)
if (*n > m_read_pos)
return false;
nReadPos -= *n;
m_read_pos -= *n;
return true;
}
@ -281,36 +282,32 @@ public:
if (dst.size() == 0) return;
// Read from the beginning of the buffer
unsigned int nReadPosNext = nReadPos + dst.size();
if (nReadPosNext > vch.size()) {
auto next_read_pos{CheckedAdd(m_read_pos, dst.size())};
if (!next_read_pos.has_value() || next_read_pos.value() > vch.size()) {
throw std::ios_base::failure("CDataStream::read(): end of data");
}
memcpy(dst.data(), &vch[nReadPos], dst.size());
if (nReadPosNext == vch.size())
{
nReadPos = 0;
memcpy(dst.data(), &vch[m_read_pos], dst.size());
if (next_read_pos.value() == vch.size()) {
m_read_pos = 0;
vch.clear();
return;
}
nReadPos = nReadPosNext;
m_read_pos = next_read_pos.value();
}
void ignore(int nSize)
void ignore(size_t num_ignore)
{
// Ignore from the beginning of the buffer
if (nSize < 0) {
throw std::ios_base::failure("CDataStream::ignore(): nSize negative");
auto next_read_pos{CheckedAdd(m_read_pos, num_ignore)};
if (!next_read_pos.has_value() || next_read_pos.value() > vch.size()) {
throw std::ios_base::failure("CDataStream::ignore(): end of data");
}
unsigned int nReadPosNext = nReadPos + nSize;
if (nReadPosNext >= vch.size())
{
if (nReadPosNext > vch.size())
throw std::ios_base::failure("CDataStream::ignore(): end of data");
nReadPos = 0;
if (next_read_pos.value() == vch.size()) {
m_read_pos = 0;
vch.clear();
return;
}
nReadPos = nReadPosNext;
m_read_pos = next_read_pos.value();
}
void write(Span<const value_type> src)
@ -594,7 +591,7 @@ private:
FILE *src; //!< source file
uint64_t nSrcPos; //!< how many bytes have been read from source
uint64_t nReadPos; //!< how many bytes have been read from this
uint64_t m_read_pos; //!< how many bytes have been read from this
uint64_t nReadLimit; //!< up to which position we're allowed to read
uint64_t nRewind; //!< how many bytes we guarantee to rewind
std::vector<std::byte> vchBuf; //!< the buffer
@ -604,7 +601,7 @@ protected:
bool Fill() {
unsigned int pos = nSrcPos % vchBuf.size();
unsigned int readNow = vchBuf.size() - pos;
unsigned int nAvail = vchBuf.size() - (nSrcPos - nReadPos) - nRewind;
unsigned int nAvail = vchBuf.size() - (nSrcPos - m_read_pos) - nRewind;
if (nAvail < readNow)
readNow = nAvail;
if (readNow == 0)
@ -619,7 +616,7 @@ protected:
public:
CBufferedFile(FILE* fileIn, uint64_t nBufSize, uint64_t nRewindIn, int nTypeIn, int nVersionIn)
: nType(nTypeIn), nVersion(nVersionIn), nSrcPos(0), nReadPos(0), nReadLimit(std::numeric_limits<uint64_t>::max()), nRewind(nRewindIn), vchBuf(nBufSize, std::byte{0})
: nType(nTypeIn), nVersion(nVersionIn), nSrcPos(0), m_read_pos(0), nReadLimit(std::numeric_limits<uint64_t>::max()), nRewind(nRewindIn), vchBuf(nBufSize, std::byte{0})
{
if (nRewindIn >= nBufSize)
throw std::ios_base::failure("Rewind limit must be less than buffer size");
@ -648,33 +645,33 @@ public:
//! check whether we're at the end of the source file
bool eof() const {
return nReadPos == nSrcPos && feof(src);
return m_read_pos == nSrcPos && feof(src);
}
//! read a number of bytes
void read(Span<std::byte> dst)
{
if (dst.size() + nReadPos > nReadLimit) {
if (dst.size() + m_read_pos > nReadLimit) {
throw std::ios_base::failure("Read attempted past buffer limit");
}
while (dst.size() > 0) {
if (nReadPos == nSrcPos)
if (m_read_pos == nSrcPos)
Fill();
unsigned int pos = nReadPos % vchBuf.size();
unsigned int pos = m_read_pos % vchBuf.size();
size_t nNow = dst.size();
if (nNow + pos > vchBuf.size())
nNow = vchBuf.size() - pos;
if (nNow + nReadPos > nSrcPos)
nNow = nSrcPos - nReadPos;
if (nNow + m_read_pos > nSrcPos)
nNow = nSrcPos - m_read_pos;
memcpy(dst.data(), &vchBuf[pos], nNow);
nReadPos += nNow;
m_read_pos += nNow;
dst = dst.subspan(nNow);
}
}
//! return the current reading position
uint64_t GetPos() const {
return nReadPos;
return m_read_pos;
}
//! rewind to a given reading position
@ -682,22 +679,22 @@ public:
size_t bufsize = vchBuf.size();
if (nPos + bufsize < nSrcPos) {
// rewinding too far, rewind as far as possible
nReadPos = nSrcPos - bufsize;
m_read_pos = nSrcPos - bufsize;
return false;
}
if (nPos > nSrcPos) {
// can't go this far forward, go as far as possible
nReadPos = nSrcPos;
m_read_pos = nSrcPos;
return false;
}
nReadPos = nPos;
m_read_pos = nPos;
return true;
}
//! prevent reading beyond a certain position
//! no argument removes the limit
bool SetLimit(uint64_t nPos = std::numeric_limits<uint64_t>::max()) {
if (nPos < nReadPos)
if (nPos < m_read_pos)
return false;
nReadLimit = nPos;
return true;
@ -714,12 +711,12 @@ public:
void FindByte(uint8_t ch)
{
while (true) {
if (nReadPos == nSrcPos)
if (m_read_pos == nSrcPos)
Fill();
if (vchBuf[nReadPos % vchBuf.size()] == std::byte{ch}) {
if (vchBuf[m_read_pos % vchBuf.size()] == std::byte{ch}) {
break;
}
nReadPos++;
m_read_pos++;
}
}
};

View file

@ -6,6 +6,7 @@
#define BITCOIN_UTIL_OVERFLOW_H
#include <limits>
#include <optional>
#include <type_traits>
template <class T>