diff --git a/cores/esp8266/umm_malloc/Notes.h b/cores/esp8266/umm_malloc/Notes.h
index e9d96b1e83..33b2fc545a 100644
--- a/cores/esp8266/umm_malloc/Notes.h
+++ b/cores/esp8266/umm_malloc/Notes.h
@@ -248,4 +248,32 @@ Enhancement ideas:
   save on the execution time spent with interrupts disabled.
 
 */
+
+/*
+  Dec 29, 2021
+  Upstream umm_malloc at git hash id 4dac43c3be7a7470dd669323021ba238081da18e
+  processed all project files with the style program uncrustify.
+
+  This PR updates our ported version of umm_malloc processed with "uncrustify".
+  This should make subsequent merges of upstream into this port easier.
+
+  This also makes the style more consistant through umm_malloc.
+
+  Some edits to source files was needed to get uncrustify to work.
+  1) macros with "if"s need to be of the form "if ( blah ) { } " curley braces
+     are needed for it to parse correctly
+  2) These "#ifdef __cplusplus" also had to be commented out while running to
+     avoid parser confusion.
+  ```
+  #ifdef __cplusplus
+  extern "C" {
+  #endif
+  ```
+  and
+  ```
+  #ifdef __cplusplus
+  }
+  #endif
+  ```
+*/
 #endif
diff --git a/cores/esp8266/umm_malloc/dbglog/README.txt b/cores/esp8266/umm_malloc/dbglog/README.txt
index 54f86e148c..90a81d382d 100644
--- a/cores/esp8266/umm_malloc/dbglog/README.txt
+++ b/cores/esp8266/umm_malloc/dbglog/README.txt
@@ -1 +1,2 @@
 Downloaded from: https://github.com/rhempel/c-helper-macros/tree/develop
+Applied uncrustify to be consistent with the rest of the umm_malloc files.
diff --git a/cores/esp8266/umm_malloc/dbglog/dbglog.h b/cores/esp8266/umm_malloc/dbglog/dbglog.h
index 2554d23ff6..21f10cfc9e 100644
--- a/cores/esp8266/umm_malloc/dbglog/dbglog.h
+++ b/cores/esp8266/umm_malloc/dbglog/dbglog.h
@@ -50,11 +50,11 @@
 #undef DBGLOG_FORCE
 
 #ifndef DBGLOG_LEVEL
-#  define DBGLOG_LEVEL 0
+#define DBGLOG_LEVEL 0
 #endif
 
 #ifndef DBGLOG_FUNCTION
-#  define DBGLOG_FUNCTION printf
+#define DBGLOG_FUNCTION printf
 #endif
 
 #define DBGLOG_32_BIT_PTR(x) ((uint32_t)(((uintptr_t)(x)) & 0xffffffff))
@@ -62,39 +62,39 @@
 /* ------------------------------------------------------------------------- */
 
 #if DBGLOG_LEVEL >= 6
-#  define DBGLOG_TRACE(format, ...) DBGLOG_FUNCTION(format, ## __VA_ARGS__)
+#define DBGLOG_TRACE(format, ...) DBGLOG_FUNCTION(format,##__VA_ARGS__)
 #else
-#  define DBGLOG_TRACE(format, ...)
+#define DBGLOG_TRACE(format, ...)
 #endif
 
 #if DBGLOG_LEVEL >= 5
-#  define DBGLOG_DEBUG(format, ...) DBGLOG_FUNCTION(format, ## __VA_ARGS__)
+#define DBGLOG_DEBUG(format, ...) DBGLOG_FUNCTION(format,##__VA_ARGS__)
 #else
-#  define DBGLOG_DEBUG(format, ...)
+#define DBGLOG_DEBUG(format, ...)
 #endif
 
 #if DBGLOG_LEVEL >= 4
-#  define DBGLOG_CRITICAL(format, ...) DBGLOG_FUNCTION(format, ## __VA_ARGS__)
+#define DBGLOG_CRITICAL(format, ...) DBGLOG_FUNCTION(format,##__VA_ARGS__)
 #else
-#  define DBGLOG_CRITICAL(format, ...)
+#define DBGLOG_CRITICAL(format, ...)
 #endif
 
 #if DBGLOG_LEVEL >= 3
-#  define DBGLOG_ERROR(format, ...) DBGLOG_FUNCTION(format, ## __VA_ARGS__)
+#define DBGLOG_ERROR(format, ...) DBGLOG_FUNCTION(format,##__VA_ARGS__)
 #else
-#  define DBGLOG_ERROR(format, ...)
+#define DBGLOG_ERROR(format, ...)
 #endif
 
 #if DBGLOG_LEVEL >= 2
-#  define DBGLOG_WARNING(format, ...) DBGLOG_FUNCTION(format, ## __VA_ARGS__)
+#define DBGLOG_WARNING(format, ...) DBGLOG_FUNCTION(format,##__VA_ARGS__)
 #else
-#  define DBGLOG_WARNING(format, ...)
+#define DBGLOG_WARNING(format, ...)
 #endif
 
 #if DBGLOG_LEVEL >= 1
-#  define DBGLOG_INFO(format, ...) DBGLOG_FUNCTION(format, ## __VA_ARGS__)
+#define DBGLOG_INFO(format, ...) DBGLOG_FUNCTION(format,##__VA_ARGS__)
 #else
-#  define DBGLOG_INFO(format, ...)
+#define DBGLOG_INFO(format, ...)
 #endif
 
-#define DBGLOG_FORCE(force, format, ...) {if(force) {DBGLOG_FUNCTION(format, ## __VA_ARGS__);}}
+#define DBGLOG_FORCE(force, format, ...) {if (force) {DBGLOG_FUNCTION(format,##__VA_ARGS__);}}
diff --git a/cores/esp8266/umm_malloc/umm_heap_select.h b/cores/esp8266/umm_malloc/umm_heap_select.h
index a329bf103c..282e87b8ff 100644
--- a/cores/esp8266/umm_malloc/umm_heap_select.h
+++ b/cores/esp8266/umm_malloc/umm_heap_select.h
@@ -32,70 +32,77 @@
 class HeapSelect {
 public:
 #if (UMM_NUM_HEAPS == 1)
-  MAYBE_ALWAYS_INLINE
-  HeapSelect(size_t id) { (void)id; }
-  MAYBE_ALWAYS_INLINE
-  ~HeapSelect() {}
+MAYBE_ALWAYS_INLINE
+HeapSelect(size_t id) {
+    (void)id;
+}
+MAYBE_ALWAYS_INLINE
+~HeapSelect() {
+}
 #else
-  MAYBE_ALWAYS_INLINE
-  HeapSelect(size_t id) : _heap_id(umm_get_current_heap_id()) {
+MAYBE_ALWAYS_INLINE
+HeapSelect(size_t id) : _heap_id(umm_get_current_heap_id()) {
     umm_set_heap_by_id(id);
-  }
+}
 
-  MAYBE_ALWAYS_INLINE
-  ~HeapSelect() {
+MAYBE_ALWAYS_INLINE
+~HeapSelect() {
     umm_set_heap_by_id(_heap_id);
-  }
+}
 
 protected:
-    size_t _heap_id;
+size_t _heap_id;
 #endif
 };
 
 class HeapSelectIram {
 public:
 #ifdef UMM_HEAP_IRAM
-  MAYBE_ALWAYS_INLINE
-  HeapSelectIram() : _heap_id(umm_get_current_heap_id()) {
+MAYBE_ALWAYS_INLINE
+HeapSelectIram() : _heap_id(umm_get_current_heap_id()) {
     umm_set_heap_by_id(UMM_HEAP_IRAM);
-  }
+}
 
-  MAYBE_ALWAYS_INLINE
-  ~HeapSelectIram() {
+MAYBE_ALWAYS_INLINE
+~HeapSelectIram() {
     umm_set_heap_by_id(_heap_id);
-  }
+}
 
 protected:
-    size_t _heap_id;
+size_t _heap_id;
 
 #else
-  MAYBE_ALWAYS_INLINE
-  HeapSelectIram() {}
-  MAYBE_ALWAYS_INLINE
-  ~HeapSelectIram() {}
+MAYBE_ALWAYS_INLINE
+HeapSelectIram() {
+}
+MAYBE_ALWAYS_INLINE
+~HeapSelectIram() {
+}
 #endif
 };
 
 class HeapSelectDram {
 public:
 #if (UMM_NUM_HEAPS == 1)
-  MAYBE_ALWAYS_INLINE
-  HeapSelectDram() {}
-  MAYBE_ALWAYS_INLINE
-  ~HeapSelectDram() {}
+MAYBE_ALWAYS_INLINE
+HeapSelectDram() {
+}
+MAYBE_ALWAYS_INLINE
+~HeapSelectDram() {
+}
 #else
-  MAYBE_ALWAYS_INLINE
-  HeapSelectDram() : _heap_id(umm_get_current_heap_id()) {
+MAYBE_ALWAYS_INLINE
+HeapSelectDram() : _heap_id(umm_get_current_heap_id()) {
     umm_set_heap_by_id(UMM_HEAP_DRAM);
-  }
+}
 
-  MAYBE_ALWAYS_INLINE
-  ~HeapSelectDram() {
+MAYBE_ALWAYS_INLINE
+~HeapSelectDram() {
     umm_set_heap_by_id(_heap_id);
-  }
+}
 
 protected:
-    size_t _heap_id;
+size_t _heap_id;
 #endif
 };
 
diff --git a/cores/esp8266/umm_malloc/umm_info.c b/cores/esp8266/umm_malloc/umm_info.c
index b88e013b67..bd3280baed 100644
--- a/cores/esp8266/umm_malloc/umm_info.c
+++ b/cores/esp8266/umm_malloc/umm_info.c
@@ -25,174 +25,174 @@
 
 // UMM_HEAP_INFO ummHeapInfo;
 
-void *umm_info( void *ptr, bool force ) {
-  UMM_CRITICAL_DECL(id_info);
+void *umm_info(void *ptr, bool force) {
+    UMM_CRITICAL_DECL(id_info);
 
-  UMM_INIT_HEAP;
+    UMM_INIT_HEAP;
 
-  uint16_t blockNo = 0;
+    uint16_t blockNo = 0;
 
-  /* Protect the critical section... */
-  UMM_CRITICAL_ENTRY(id_info);
+    /* Protect the critical section... */
+    UMM_CRITICAL_ENTRY(id_info);
 
-  umm_heap_context_t *_context = umm_get_current_heap();
+    umm_heap_context_t *_context = umm_get_current_heap();
 
-  /*
-   * Clear out all of the entries in the ummHeapInfo structure before doing
-   * any calculations..
-   */
-  memset( &_context->info, 0, sizeof( _context->info ) );
+    /*
+     * Clear out all of the entries in the ummHeapInfo structure before doing
+     * any calculations..
+     */
+    memset(&_context->info, 0, sizeof(_context->info));
 
-  DBGLOG_FORCE( force, "\n" );
-  DBGLOG_FORCE( force, "+----------+-------+--------+--------+-------+--------+--------+\n" );
-  DBGLOG_FORCE( force, "|0x%08lx|B %5d|NB %5d|PB %5d|Z %5d|NF %5d|PF %5d|\n",
-      DBGLOG_32_BIT_PTR(&UMM_BLOCK(blockNo)),
-      blockNo,
-      UMM_NBLOCK(blockNo) & UMM_BLOCKNO_MASK,
-      UMM_PBLOCK(blockNo),
-      (UMM_NBLOCK(blockNo) & UMM_BLOCKNO_MASK )-blockNo,
-      UMM_NFREE(blockNo),
-      UMM_PFREE(blockNo) );
+    DBGLOG_FORCE(force, "\n");
+    DBGLOG_FORCE(force, "+----------+-------+--------+--------+-------+--------+--------+\n");
+    DBGLOG_FORCE(force, "|0x%08lx|B %5d|NB %5d|PB %5d|Z %5d|NF %5d|PF %5d|\n",
+        DBGLOG_32_BIT_PTR(&UMM_BLOCK(blockNo)),
+        blockNo,
+        UMM_NBLOCK(blockNo) & UMM_BLOCKNO_MASK,
+        UMM_PBLOCK(blockNo),
+        (UMM_NBLOCK(blockNo) & UMM_BLOCKNO_MASK) - blockNo,
+        UMM_NFREE(blockNo),
+        UMM_PFREE(blockNo));
 
-  /*
-   * Now loop through the block lists, and keep track of the number and size
-   * of used and free blocks. The terminating condition is an nb pointer with
-   * a value of zero...
-   */
+    /*
+     * Now loop through the block lists, and keep track of the number and size
+     * of used and free blocks. The terminating condition is an nb pointer with
+     * a value of zero...
+     */
 
-  blockNo = UMM_NBLOCK(blockNo) & UMM_BLOCKNO_MASK;
+    blockNo = UMM_NBLOCK(blockNo) & UMM_BLOCKNO_MASK;
 
-  while( UMM_NBLOCK(blockNo) & UMM_BLOCKNO_MASK ) {
-    size_t curBlocks = (UMM_NBLOCK(blockNo) & UMM_BLOCKNO_MASK )-blockNo;
+    while (UMM_NBLOCK(blockNo) & UMM_BLOCKNO_MASK) {
+        size_t curBlocks = (UMM_NBLOCK(blockNo) & UMM_BLOCKNO_MASK) - blockNo;
 
-    ++_context->info.totalEntries;
-    _context->info.totalBlocks += curBlocks;
+        ++_context->info.totalEntries;
+        _context->info.totalBlocks += curBlocks;
 
-    /* Is this a free block? */
+        /* Is this a free block? */
 
-    if( UMM_NBLOCK(blockNo) & UMM_FREELIST_MASK ) {
-      ++_context->info.freeEntries;
-      _context->info.freeBlocks += curBlocks;
-      _context->info.freeBlocksSquared += (curBlocks * curBlocks);
+        if (UMM_NBLOCK(blockNo) & UMM_FREELIST_MASK) {
+            ++_context->info.freeEntries;
+            _context->info.freeBlocks += curBlocks;
+            _context->info.freeBlocksSquared += (curBlocks * curBlocks);
 
-      if (_context->info.maxFreeContiguousBlocks < curBlocks) {
-        _context->info.maxFreeContiguousBlocks = curBlocks;
-      }
+            if (_context->info.maxFreeContiguousBlocks < curBlocks) {
+                _context->info.maxFreeContiguousBlocks = curBlocks;
+            }
 
-      DBGLOG_FORCE( force, "|0x%08lx|B %5d|NB %5d|PB %5d|Z %5u|NF %5d|PF %5d|\n",
-          DBGLOG_32_BIT_PTR(&UMM_BLOCK(blockNo)),
-          blockNo,
-          UMM_NBLOCK(blockNo) & UMM_BLOCKNO_MASK,
-          UMM_PBLOCK(blockNo),
-          (uint16_t)curBlocks,
-          UMM_NFREE(blockNo),
-          UMM_PFREE(blockNo) );
+            DBGLOG_FORCE(force, "|0x%08lx|B %5d|NB %5d|PB %5d|Z %5u|NF %5d|PF %5d|\n",
+                DBGLOG_32_BIT_PTR(&UMM_BLOCK(blockNo)),
+                blockNo,
+                UMM_NBLOCK(blockNo) & UMM_BLOCKNO_MASK,
+                UMM_PBLOCK(blockNo),
+                (uint16_t)curBlocks,
+                UMM_NFREE(blockNo),
+                UMM_PFREE(blockNo));
 
-      /* Does this block address match the ptr we may be trying to free? */
+            /* Does this block address match the ptr we may be trying to free? */
 
-      if( ptr == &UMM_BLOCK(blockNo) ) {
+            if (ptr == &UMM_BLOCK(blockNo)) {
 
-        /* Release the critical section... */
-        UMM_CRITICAL_EXIT(id_info);
+                /* Release the critical section... */
+                UMM_CRITICAL_EXIT(id_info);
 
-        return( ptr );
-      }
-    } else {
-      ++_context->info.usedEntries;
-      _context->info.usedBlocks += curBlocks;
-
-      DBGLOG_FORCE( force, "|0x%08lx|B %5d|NB %5d|PB %5d|Z %5u|\n",
-          DBGLOG_32_BIT_PTR(&UMM_BLOCK(blockNo)),
-          blockNo,
-          UMM_NBLOCK(blockNo) & UMM_BLOCKNO_MASK,
-          UMM_PBLOCK(blockNo),
-          (uint16_t)curBlocks );
+                return ptr;
+            }
+        } else {
+            ++_context->info.usedEntries;
+            _context->info.usedBlocks += curBlocks;
+
+            DBGLOG_FORCE(force, "|0x%08lx|B %5d|NB %5d|PB %5d|Z %5u|\n",
+                DBGLOG_32_BIT_PTR(&UMM_BLOCK(blockNo)),
+                blockNo,
+                UMM_NBLOCK(blockNo) & UMM_BLOCKNO_MASK,
+                UMM_PBLOCK(blockNo),
+                (uint16_t)curBlocks);
+        }
+
+        blockNo = UMM_NBLOCK(blockNo) & UMM_BLOCKNO_MASK;
     }
 
-    blockNo = UMM_NBLOCK(blockNo) & UMM_BLOCKNO_MASK;
-  }
-
-  /*
-   * The very last block is used as a placeholder to indicate that
-   * there are no more blocks in the heap, so it cannot be used
-   * for anything - at the same time, the size of this block must
-   * ALWAYS be exactly 1 !
-   */
-
-  DBGLOG_FORCE( force, "|0x%08lx|B %5d|NB %5d|PB %5d|Z %5d|NF %5d|PF %5d|\n",
-      DBGLOG_32_BIT_PTR(&UMM_BLOCK(blockNo)),
-      blockNo,
-      UMM_NBLOCK(blockNo) & UMM_BLOCKNO_MASK,
-      UMM_PBLOCK(blockNo),
-      UMM_NUMBLOCKS-blockNo,
-      UMM_NFREE(blockNo),
-      UMM_PFREE(blockNo) );
-
-  DBGLOG_FORCE( force, "+----------+-------+--------+--------+-------+--------+--------+\n" );
-
-  DBGLOG_FORCE( force, "Total Entries %5d    Used Entries %5d    Free Entries %5d\n",
-      _context->info.totalEntries,
-      _context->info.usedEntries,
-      _context->info.freeEntries );
-
-  DBGLOG_FORCE( force, "Total Blocks  %5d    Used Blocks  %5d    Free Blocks  %5d\n",
-      _context->info.totalBlocks,
-      _context->info.usedBlocks,
-      _context->info.freeBlocks  );
-
-  DBGLOG_FORCE( force, "+--------------------------------------------------------------+\n" );
-
-  DBGLOG_FORCE( force, "Usage Metric:               %5d\n", umm_usage_metric_core(_context));
-  DBGLOG_FORCE( force, "Fragmentation Metric:       %5d\n", umm_fragmentation_metric_core(_context));
-
-  DBGLOG_FORCE( force, "+--------------------------------------------------------------+\n" );
-
-#if defined(UMM_STATS) || defined(UMM_STATS_FULL)
-#if !defined(UMM_INLINE_METRICS)
-  if (_context->info.freeBlocks == _context->stats.free_blocks) {
-      DBGLOG_FORCE( force, "heap info Free blocks and heap statistics Free blocks match.\n");
-  } else {
-      DBGLOG_FORCE( force, "\nheap info Free blocks  %5d != heap statistics Free Blocks  %5d\n\n",
-          _context->info.freeBlocks,
-          _context->stats.free_blocks  );
-  }
-  DBGLOG_FORCE( force, "+--------------------------------------------------------------+\n" );
-#endif
+    /*
+     * The very last block is used as a placeholder to indicate that
+     * there are no more blocks in the heap, so it cannot be used
+     * for anything - at the same time, the size of this block must
+     * ALWAYS be exactly 1 !
+     */
+
+    DBGLOG_FORCE(force, "|0x%08lx|B %5d|NB %5d|PB %5d|Z %5d|NF %5d|PF %5d|\n",
+        DBGLOG_32_BIT_PTR(&UMM_BLOCK(blockNo)),
+        blockNo,
+        UMM_NBLOCK(blockNo) & UMM_BLOCKNO_MASK,
+        UMM_PBLOCK(blockNo),
+        UMM_NUMBLOCKS - blockNo,
+        UMM_NFREE(blockNo),
+        UMM_PFREE(blockNo));
+
+    DBGLOG_FORCE(force, "+----------+-------+--------+--------+-------+--------+--------+\n");
+
+    DBGLOG_FORCE(force, "Total Entries %5d    Used Entries %5d    Free Entries %5d\n",
+        _context->info.totalEntries,
+        _context->info.usedEntries,
+        _context->info.freeEntries);
+
+    DBGLOG_FORCE(force, "Total Blocks  %5d    Used Blocks  %5d    Free Blocks  %5d\n",
+        _context->info.totalBlocks,
+        _context->info.usedBlocks,
+        _context->info.freeBlocks);
+
+    DBGLOG_FORCE(force, "+--------------------------------------------------------------+\n");
+
+    DBGLOG_FORCE(force, "Usage Metric:               %5d\n", umm_usage_metric_core(_context));
+    DBGLOG_FORCE(force, "Fragmentation Metric:       %5d\n", umm_fragmentation_metric_core(_context));
+
+    DBGLOG_FORCE(force, "+--------------------------------------------------------------+\n");
+
+    #if defined(UMM_STATS) || defined(UMM_STATS_FULL)
+    #if !defined(UMM_INLINE_METRICS)
+    if (_context->info.freeBlocks == _context->stats.free_blocks) {
+        DBGLOG_FORCE(force, "heap info Free blocks and heap statistics Free blocks match.\n");
+    } else {
+        DBGLOG_FORCE(force, "\nheap info Free blocks  %5d != heap statistics Free Blocks  %5d\n\n",
+            _context->info.freeBlocks,
+            _context->stats.free_blocks);
+    }
+    DBGLOG_FORCE(force, "+--------------------------------------------------------------+\n");
+    #endif
 
-  umm_print_stats(force);
-#endif
+    umm_print_stats(force);
+    #endif
 
-  /* Release the critical section... */
-  UMM_CRITICAL_EXIT(id_info);
+    /* Release the critical section... */
+    UMM_CRITICAL_EXIT(id_info);
 
-  return( NULL );
+    return NULL;
 }
 
 /* ------------------------------------------------------------------------ */
 
-size_t umm_free_heap_size_core( umm_heap_context_t *_context ) {
-  return (size_t)_context->info.freeBlocks * sizeof(umm_block);
+size_t umm_free_heap_size_core(umm_heap_context_t *_context) {
+    return (size_t)_context->info.freeBlocks * sizeof(umm_block);
 }
 
-size_t umm_free_heap_size( void ) {
-#ifndef UMM_INLINE_METRICS
-  umm_info(NULL, false);
-#endif
+size_t umm_free_heap_size(void) {
+    #ifndef UMM_INLINE_METRICS
+    umm_info(NULL, false);
+    #endif
 
-  return umm_free_heap_size_core(umm_get_current_heap());
+    return umm_free_heap_size_core(umm_get_current_heap());
 }
 
-//C Breaking change in upstream umm_max_block_size() was changed to
-//C umm_max_free_block_size() keeping old function name for (dot) releases.
-//C TODO: update at next major release.
-//C size_t umm_max_free_block_size( void ) {
-size_t umm_max_block_size_core( umm_heap_context_t *_context ) {
-  return _context->info.maxFreeContiguousBlocks * sizeof(umm_block);
+// C Breaking change in upstream umm_max_block_size() was changed to
+// C umm_max_free_block_size() keeping old function name for (dot) releases.
+// C TODO: update at next major release.
+// C size_t umm_max_free_block_size( void ) {
+size_t umm_max_block_size_core(umm_heap_context_t *_context) {
+    return _context->info.maxFreeContiguousBlocks * sizeof(umm_block);
 }
 
-size_t umm_max_block_size( void ) {
-  umm_info(NULL, false);
-  return umm_max_block_size_core(umm_get_current_heap());
+size_t umm_max_block_size(void) {
+    umm_info(NULL, false);
+    return umm_max_block_size_core(umm_get_current_heap());
 }
 
 /*
@@ -200,60 +200,61 @@ size_t umm_max_block_size( void ) {
   umm_fragmentation_metric() must to be preceded by a call to umm_info(NULL, false)
   for updated results.
 */
-int umm_usage_metric_core( umm_heap_context_t *_context ) {
-//C Note, umm_metrics also appears in the upstrean w/o definition. I suspect it is suppose to be ummHeapInfo.
-  // DBGLOG_DEBUG( "usedBlocks %d totalBlocks %d\n", umm_metrics.usedBlocks, ummHeapInfo.totalBlocks);
-  DBGLOG_DEBUG( "usedBlocks %d totalBlocks %d\n", _context->info.usedBlocks, _context->info.totalBlocks);
-  if (_context->info.freeBlocks)
-    return (int)((_context->info.usedBlocks * 100)/(_context->info.freeBlocks));
-
-  return -1;  // no freeBlocks
+int umm_usage_metric_core(umm_heap_context_t *_context) {
+// C Note, umm_metrics also appears in the upstrean w/o definition. I suspect it is suppose to be ummHeapInfo.
+    // DBGLOG_DEBUG( "usedBlocks %d totalBlocks %d\n", umm_metrics.usedBlocks, ummHeapInfo.totalBlocks);
+    DBGLOG_DEBUG("usedBlocks %d totalBlocks %d\n", _context->info.usedBlocks, _context->info.totalBlocks);
+    if (_context->info.freeBlocks) {
+        return (int)((_context->info.usedBlocks * 100) / (_context->info.freeBlocks));
+    }
+
+    return -1; // no freeBlocks
 }
 
-int umm_usage_metric( void ) {
-#ifndef UMM_INLINE_METRICS
-  umm_info(NULL, false);
-#endif
+int umm_usage_metric(void) {
+    #ifndef UMM_INLINE_METRICS
+    umm_info(NULL, false);
+    #endif
 
-  return umm_usage_metric_core(umm_get_current_heap());
+    return umm_usage_metric_core(umm_get_current_heap());
 }
-uint32_t sqrt32 (uint32_t n);
-
-int umm_fragmentation_metric_core( umm_heap_context_t *_context ) {
-  // DBGLOG_DEBUG( "freeBlocks %d freeBlocksSquared %d\n", umm_metrics.freeBlocks, ummHeapInfo.freeBlocksSquared);
-  DBGLOG_DEBUG( "freeBlocks %d freeBlocksSquared %d\n", _context->info.freeBlocks, _context->info.freeBlocksSquared);
-  if (0 == _context->info.freeBlocks) {
-      return 0;
-  } else {
-      //upstream version: return (100 - (((uint32_t)(sqrtf(ummHeapInfo.freeBlocksSquared)) * 100)/(ummHeapInfo.freeBlocks)));
-      return (100 - (((uint32_t)(sqrt32(_context->info.freeBlocksSquared)) * 100)/(_context->info.freeBlocks)));
-  }
+uint32_t sqrt32(uint32_t n);
+
+int umm_fragmentation_metric_core(umm_heap_context_t *_context) {
+    // DBGLOG_DEBUG( "freeBlocks %d freeBlocksSquared %d\n", umm_metrics.freeBlocks, ummHeapInfo.freeBlocksSquared);
+    DBGLOG_DEBUG("freeBlocks %d freeBlocksSquared %d\n", _context->info.freeBlocks, _context->info.freeBlocksSquared);
+    if (0 == _context->info.freeBlocks) {
+        return 0;
+    } else {
+        // upstream version: return (100 - (((uint32_t)(sqrtf(ummHeapInfo.freeBlocksSquared)) * 100)/(ummHeapInfo.freeBlocks)));
+        return 100 - (((uint32_t)(sqrt32(_context->info.freeBlocksSquared)) * 100) / (_context->info.freeBlocks));
+    }
 }
 
-int umm_fragmentation_metric( void ) {
-#ifndef UMM_INLINE_METRICS
-  umm_info(NULL, false);
-#endif
+int umm_fragmentation_metric(void) {
+    #ifndef UMM_INLINE_METRICS
+    umm_info(NULL, false);
+    #endif
 
-  return umm_fragmentation_metric_core(umm_get_current_heap());
+    return umm_fragmentation_metric_core(umm_get_current_heap());
 }
 
 #ifdef UMM_INLINE_METRICS
-static void umm_fragmentation_metric_init( umm_heap_context_t *_context ) {
+static void umm_fragmentation_metric_init(umm_heap_context_t *_context) {
     _context->info.freeBlocks = UMM_NUMBLOCKS - 2;
     _context->info.freeBlocksSquared = _context->info.freeBlocks * _context->info.freeBlocks;
 }
 
-static void umm_fragmentation_metric_add( umm_heap_context_t *_context, uint16_t c ) {
+static void umm_fragmentation_metric_add(umm_heap_context_t *_context, uint16_t c) {
     uint16_t blocks = (UMM_NBLOCK(c) & UMM_BLOCKNO_MASK) - c;
-    DBGLOG_DEBUG( "Add block %d size %d to free metric\n", c, blocks);
+    DBGLOG_DEBUG("Add block %d size %d to free metric\n", c, blocks);
     _context->info.freeBlocks += blocks;
     _context->info.freeBlocksSquared += (blocks * blocks);
 }
 
-static void umm_fragmentation_metric_remove( umm_heap_context_t *_context, uint16_t c ) {
+static void umm_fragmentation_metric_remove(umm_heap_context_t *_context, uint16_t c) {
     uint16_t blocks = (UMM_NBLOCK(c) & UMM_BLOCKNO_MASK) - c;
-    DBGLOG_DEBUG( "Remove block %d size %d from free metric\n", c, blocks);
+    DBGLOG_DEBUG("Remove block %d size %d from free metric\n", c, blocks);
     _context->info.freeBlocks -= blocks;
     _context->info.freeBlocksSquared -= (blocks * blocks);
 }
diff --git a/cores/esp8266/umm_malloc/umm_integrity.c b/cores/esp8266/umm_malloc/umm_integrity.c
index bb68c52f4d..c66ec3bb00 100644
--- a/cores/esp8266/umm_malloc/umm_integrity.c
+++ b/cores/esp8266/umm_malloc/umm_integrity.c
@@ -28,109 +28,108 @@
  * chain.
  */
 bool umm_integrity_check(void) {
-  UMM_CRITICAL_DECL(id_integrity);
-  bool ok = true;
-  uint16_t prev;
-  uint16_t cur;
-
-  UMM_INIT_HEAP;
-
-  /* Iterate through all free blocks */
-  prev = 0;
-  UMM_CRITICAL_ENTRY(id_integrity);
-
-  umm_heap_context_t *_context = umm_get_current_heap();
-
-  while(1) {
-    cur = UMM_NFREE(prev);
-
-    /* Check that next free block number is valid */
-    if (cur >= UMM_NUMBLOCKS) {
-      DBGLOG_FUNCTION("heap integrity broken: too large next free num: %d "
-          "(in block %d, addr 0x%08x)\n", cur, prev,
-          DBGLOG_32_BIT_PTR(&UMM_NBLOCK(prev)));
-      ok = false;
-      goto clean;
+    UMM_CRITICAL_DECL(id_integrity);
+    bool ok = true;
+    uint16_t prev;
+    uint16_t cur;
+
+    UMM_INIT_HEAP;
+
+    /* Iterate through all free blocks */
+    prev = 0;
+    UMM_CRITICAL_ENTRY(id_integrity);
+
+    umm_heap_context_t *_context = umm_get_current_heap();
+
+    while (1) {
+        cur = UMM_NFREE(prev);
+
+        /* Check that next free block number is valid */
+        if (cur >= UMM_NUMBLOCKS) {
+            DBGLOG_FUNCTION("heap integrity broken: too large next free num: %d "
+                "(in block %d, addr 0x%08x)\n", cur, prev,
+                DBGLOG_32_BIT_PTR(&UMM_NBLOCK(prev)));
+            ok = false;
+            goto clean;
+        }
+        if (cur == 0) {
+            /* No more free blocks */
+            break;
+        }
+
+        /* Check if prev free block number matches */
+        if (UMM_PFREE(cur) != prev) {
+            DBGLOG_FUNCTION("heap integrity broken: free links don't match: "
+                "%d -> %d, but %d -> %d\n",
+                prev, cur, cur, UMM_PFREE(cur));
+            ok = false;
+            goto clean;
+        }
+
+        UMM_PBLOCK(cur) |= UMM_FREELIST_MASK;
+
+        prev = cur;
     }
-    if (cur == 0) {
-      /* No more free blocks */
-      break;
-    }
-
-    /* Check if prev free block number matches */
-    if (UMM_PFREE(cur) != prev) {
-      DBGLOG_FUNCTION("heap integrity broken: free links don't match: "
-          "%d -> %d, but %d -> %d\n",
-          prev, cur, cur, UMM_PFREE(cur));
-      ok = false;
-      goto clean;
-    }
-
-    UMM_PBLOCK(cur) |= UMM_FREELIST_MASK;
-
-    prev = cur;
-  }
 
-  /* Iterate through all blocks */
-  prev = 0;
-  while(1) {
-    cur = UMM_NBLOCK(prev) & UMM_BLOCKNO_MASK;
-
-    /* Check that next block number is valid */
-    if (cur >= UMM_NUMBLOCKS) {
-      DBGLOG_FUNCTION("heap integrity broken: too large next block num: %d "
-          "(in block %d, addr 0x%08x)\n", cur, prev,
-          DBGLOG_32_BIT_PTR(&UMM_NBLOCK(prev)));
-      ok = false;
-      goto clean;
-    }
-    if (cur == 0) {
-      /* No more blocks */
-      break;
-    }
-
-    /* make sure the free mark is appropriate, and unmark it */
-    if ((UMM_NBLOCK(cur) & UMM_FREELIST_MASK)
-        != (UMM_PBLOCK(cur) & UMM_FREELIST_MASK))
-    {
-      DBGLOG_FUNCTION("heap integrity broken: mask wrong at addr 0x%08x: n=0x%x, p=0x%x\n",
-          DBGLOG_32_BIT_PTR(&UMM_NBLOCK(cur)),
-          (UMM_NBLOCK(cur) & UMM_FREELIST_MASK),
-          (UMM_PBLOCK(cur) & UMM_FREELIST_MASK));
-      ok = false;
-      goto clean;
-    }
-
-    /* make sure the block list is sequential */
-    if (cur <= prev ) {
-     DBGLOG_FUNCTION("heap integrity broken: next block %d is before prev this one "
-          "(in block %d, addr 0x%08x)\n", cur, prev,
-          DBGLOG_32_BIT_PTR(&UMM_NBLOCK(prev)));
-      ok = false;
-      goto clean;
-    }
+    /* Iterate through all blocks */
+    prev = 0;
+    while (1) {
+        cur = UMM_NBLOCK(prev) & UMM_BLOCKNO_MASK;
+
+        /* Check that next block number is valid */
+        if (cur >= UMM_NUMBLOCKS) {
+            DBGLOG_FUNCTION("heap integrity broken: too large next block num: %d "
+                "(in block %d, addr 0x%08x)\n", cur, prev,
+                DBGLOG_32_BIT_PTR(&UMM_NBLOCK(prev)));
+            ok = false;
+            goto clean;
+        }
+        if (cur == 0) {
+            /* No more blocks */
+            break;
+        }
+
+        /* make sure the free mark is appropriate, and unmark it */
+        if ((UMM_NBLOCK(cur) & UMM_FREELIST_MASK)
+            != (UMM_PBLOCK(cur) & UMM_FREELIST_MASK)) {
+            DBGLOG_FUNCTION("heap integrity broken: mask wrong at addr 0x%08x: n=0x%x, p=0x%x\n",
+                DBGLOG_32_BIT_PTR(&UMM_NBLOCK(cur)),
+                (UMM_NBLOCK(cur) & UMM_FREELIST_MASK),
+                (UMM_PBLOCK(cur) & UMM_FREELIST_MASK));
+            ok = false;
+            goto clean;
+        }
+
+        /* make sure the block list is sequential */
+        if (cur <= prev) {
+            DBGLOG_FUNCTION("heap integrity broken: next block %d is before prev this one "
+                "(in block %d, addr 0x%08x)\n", cur, prev,
+                DBGLOG_32_BIT_PTR(&UMM_NBLOCK(prev)));
+            ok = false;
+            goto clean;
+        }
 
 /* unmark */
-    UMM_PBLOCK(cur) &= UMM_BLOCKNO_MASK;
-
-    /* Check if prev block number matches */
-    if (UMM_PBLOCK(cur) != prev) {
-      DBGLOG_FUNCTION("heap integrity broken: block links don't match: "
-          "%d -> %d, but %d -> %d\n",
-          prev, cur, cur, UMM_PBLOCK(cur));
-      ok = false;
-      goto clean;
+        UMM_PBLOCK(cur) &= UMM_BLOCKNO_MASK;
+
+        /* Check if prev block number matches */
+        if (UMM_PBLOCK(cur) != prev) {
+            DBGLOG_FUNCTION("heap integrity broken: block links don't match: "
+                "%d -> %d, but %d -> %d\n",
+                prev, cur, cur, UMM_PBLOCK(cur));
+            ok = false;
+            goto clean;
+        }
+
+        prev = cur;
     }
 
-    prev = cur;
-  }
-
 clean:
-  UMM_CRITICAL_EXIT(id_integrity);
-  if (!ok){
-    UMM_HEAP_CORRUPTION_CB();
-  }
-  return ok;
+    UMM_CRITICAL_EXIT(id_integrity);
+    if (!ok) {
+        UMM_HEAP_CORRUPTION_CB();
+    }
+    return ok;
 }
 
 #endif
diff --git a/cores/esp8266/umm_malloc/umm_local.c b/cores/esp8266/umm_malloc/umm_local.c
index c1169a8a8f..7d8bf7e7e6 100644
--- a/cores/esp8266/umm_malloc/umm_local.c
+++ b/cores/esp8266/umm_malloc/umm_local.c
@@ -12,22 +12,21 @@ UMM_TIME_STATS time_stats = {
     {0xFFFFFFFF, 0U, 0U, 0U},
     {0xFFFFFFFF, 0U, 0U, 0U},
     {0xFFFFFFFF, 0U, 0U, 0U},
-#ifdef UMM_INFO
+    #ifdef UMM_INFO
     {0xFFFFFFFF, 0U, 0U, 0U},
-#endif
-#if defined(UMM_POISON_CHECK) || defined(UMM_POISON_CHECK_LITE)
+    #endif
+    #if defined(UMM_POISON_CHECK) || defined(UMM_POISON_CHECK_LITE)
     {0xFFFFFFFF, 0U, 0U, 0U},
-#endif
-#ifdef UMM_INTEGRITY_CHECK
+    #endif
+    #ifdef UMM_INTEGRITY_CHECK
     {0xFFFFFFFF, 0U, 0U, 0U},
-#endif
-    {0xFFFFFFFF, 0U, 0U, 0U} };
+    #endif
+    {0xFFFFFFFF, 0U, 0U, 0U}
+};
 
-bool ICACHE_FLASH_ATTR get_umm_get_perf_data(UMM_TIME_STATS *p, size_t size)
-{
+bool ICACHE_FLASH_ATTR get_umm_get_perf_data(UMM_TIME_STATS *p, size_t size) {
     UMM_CRITICAL_DECL(id_no_tag);
-    if (p && sizeof(time_stats) == size)
-    {
+    if (p && sizeof(time_stats) == size) {
         UMM_CRITICAL_ENTRY(id_no_tag);
         memcpy(p, &time_stats, size);
         UMM_CRITICAL_EXIT(id_no_tag);
@@ -42,42 +41,45 @@ bool ICACHE_FLASH_ATTR get_umm_get_perf_data(UMM_TIME_STATS *p, size_t size)
 #if defined(UMM_POISON_CHECK_LITE)
 // We skip this when doing the full poison check.
 
-static bool check_poison_neighbors( umm_heap_context_t *_context, uint16_t cur ) {
-  uint16_t c;
+static bool check_poison_neighbors(umm_heap_context_t *_context, uint16_t cur) {
+    uint16_t c;
 
-  if ( 0 == cur )
-    return true;
+    if (0 == cur) {
+        return true;
+    }
 
-  c = UMM_PBLOCK(cur) & UMM_BLOCKNO_MASK;
-  while( c && (UMM_NBLOCK(c) & UMM_BLOCKNO_MASK) ) {
-    /*
-       There can be up to 1 free block neighbor in either direction.
-       This loop should self limit to 2 passes, due to heap design.
-       i.e. Adjacent free space is always consolidated.
-     */
-    if ( !(UMM_NBLOCK(c) & UMM_FREELIST_MASK) ) {
-      if ( !check_poison_block(&UMM_BLOCK(c)) )
-        return false;
-
-      break;
+    c = UMM_PBLOCK(cur) & UMM_BLOCKNO_MASK;
+    while (c && (UMM_NBLOCK(c) & UMM_BLOCKNO_MASK)) {
+        /*
+           There can be up to 1 free block neighbor in either direction.
+           This loop should self limit to 2 passes, due to heap design.
+           i.e. Adjacent free space is always consolidated.
+         */
+        if (!(UMM_NBLOCK(c) & UMM_FREELIST_MASK)) {
+            if (!check_poison_block(&UMM_BLOCK(c))) {
+                return false;
+            }
+
+            break;
+        }
+
+        c = UMM_PBLOCK(c) & UMM_BLOCKNO_MASK;
     }
 
-    c = UMM_PBLOCK(c) & UMM_BLOCKNO_MASK;
-  }
+    c = UMM_NBLOCK(cur) & UMM_BLOCKNO_MASK;
+    while ((UMM_NBLOCK(c) & UMM_BLOCKNO_MASK)) {
+        if (!(UMM_NBLOCK(c) & UMM_FREELIST_MASK)) {
+            if (!check_poison_block(&UMM_BLOCK(c))) {
+                return false;
+            }
 
-  c = UMM_NBLOCK(cur) & UMM_BLOCKNO_MASK;
-  while( (UMM_NBLOCK(c) & UMM_BLOCKNO_MASK) ) {
-    if ( !(UMM_NBLOCK(c) & UMM_FREELIST_MASK) ) {
-      if ( !check_poison_block(&UMM_BLOCK(c)) )
-        return false;
+            break;
+        }
 
-      break;
+        c = UMM_NBLOCK(c) & UMM_BLOCKNO_MASK;
     }
 
-    c = UMM_NBLOCK(c) & UMM_BLOCKNO_MASK;
-  }
-
-  return true;
+    return true;
 }
 #endif
 
@@ -85,52 +87,52 @@ static bool check_poison_neighbors( umm_heap_context_t *_context, uint16_t cur )
 
 /* ------------------------------------------------------------------------ */
 
-static void *get_unpoisoned_check_neighbors( void *vptr, const char* file, int line ) {
-  uintptr_t ptr = (uintptr_t)vptr;
-
-  if (ptr != 0) {
-
-    ptr -= (sizeof(UMM_POISONED_BLOCK_LEN_TYPE) + UMM_POISON_SIZE_BEFORE);
-
-#if defined(UMM_POISON_CHECK_LITE)
-    UMM_CRITICAL_DECL(id_poison);
-    uint16_t c;
-    bool poison = false;
-    umm_heap_context_t *_context = umm_get_ptr_context( vptr );
-    if (NULL == _context) {
-      panic();
-      return NULL;
+static void *get_unpoisoned_check_neighbors(void *vptr, const char *file, int line) {
+    uintptr_t ptr = (uintptr_t)vptr;
+
+    if (ptr != 0) {
+
+        ptr -= (sizeof(UMM_POISONED_BLOCK_LEN_TYPE) + UMM_POISON_SIZE_BEFORE);
+
+        #if defined(UMM_POISON_CHECK_LITE)
+        UMM_CRITICAL_DECL(id_poison);
+        uint16_t c;
+        bool poison = false;
+        umm_heap_context_t *_context = umm_get_ptr_context(vptr);
+        if (NULL == _context) {
+            panic();
+            return NULL;
+        }
+        /* Figure out which block we're in. Note the use of truncated division... */
+        c = (ptr - (uintptr_t)(&(_context->heap[0]))) / sizeof(umm_block);
+
+        UMM_CRITICAL_ENTRY(id_poison);
+        poison = check_poison_block(&UMM_BLOCK(c)) && check_poison_neighbors(_context, c);
+        UMM_CRITICAL_EXIT(id_poison);
+
+        if (!poison) {
+            if (file) {
+                __panic_func(file, line, "");
+            } else {
+                abort();
+            }
+        }
+        #else
+        /*
+         *  No need to check poison here. POISON_CHECK() has already done a
+         *  full heap check.
+         */
+        (void)file;
+        (void)line;
+        #endif
     }
-    /* Figure out which block we're in. Note the use of truncated division... */
-    c = (ptr - (uintptr_t)(&(_context->heap[0])))/sizeof(umm_block);
-
-    UMM_CRITICAL_ENTRY(id_poison);
-    poison = check_poison_block(&UMM_BLOCK(c)) && check_poison_neighbors(_context, c);
-    UMM_CRITICAL_EXIT(id_poison);
-
-    if (!poison) {
-      if (file) {
-        __panic_func(file, line, "");
-      } else {
-        abort();
-      }
-    }
-#else
-    /*
-     *  No need to check poison here. POISON_CHECK() has already done a
-     *  full heap check.
-     */
-    (void)file;
-    (void)line;
-#endif
-  }
 
-  return (void *)ptr;
+    return (void *)ptr;
 }
 
 /* ------------------------------------------------------------------------ */
 
-void *umm_poison_realloc_fl(void *ptr, size_t size, const char* file, int line) {
+void *umm_poison_realloc_fl(void *ptr, size_t size, const char *file, int line) {
     void *ret;
 
     ptr = get_unpoisoned_check_neighbors(ptr, file, line);
@@ -145,7 +147,7 @@ void *umm_poison_realloc_fl(void *ptr, size_t size, const char* file, int line)
 
 /* ------------------------------------------------------------------------ */
 
-void umm_poison_free_fl(void *ptr, const char* file, int line) {
+void umm_poison_free_fl(void *ptr, const char *file, int line) {
 
     ptr = get_unpoisoned_check_neighbors(ptr, file, line);
 
@@ -156,18 +158,18 @@ void umm_poison_free_fl(void *ptr, const char* file, int line) {
 /* ------------------------------------------------------------------------ */
 
 #if defined(UMM_STATS) || defined(UMM_STATS_FULL) || defined(UMM_INFO)
-size_t umm_block_size( void ) {
-  return sizeof(umm_block);
+size_t umm_block_size(void) {
+    return sizeof(umm_block);
 }
 #endif
 
 #if defined(UMM_STATS) || defined(UMM_STATS_FULL)
 // Keep complete call path in IRAM
-size_t umm_free_heap_size_lw( void ) {
-  UMM_INIT_HEAP;
+size_t umm_free_heap_size_lw(void) {
+    UMM_INIT_HEAP;
 
-  umm_heap_context_t *_context = umm_get_current_heap();
-  return (size_t)_context->UMM_FREE_BLOCKS * sizeof(umm_block);
+    umm_heap_context_t *_context = umm_get_current_heap();
+    return (size_t)_context->UMM_FREE_BLOCKS * sizeof(umm_block);
 }
 #endif
 
@@ -187,19 +189,19 @@ size_t xPortGetFreeHeapSize(void) __attribute__ ((alias("umm_free_heap_size")));
 
 #if defined(UMM_STATS) || defined(UMM_STATS_FULL)
 void umm_print_stats(int force) {
-  umm_heap_context_t *_context = umm_get_current_heap();
-
-  DBGLOG_FORCE( force, "umm heap statistics:\n");
-  DBGLOG_FORCE( force,   "  Heap ID           %5u\n", _context->id);
-  DBGLOG_FORCE( force,   "  Free Space        %5u\n", _context->UMM_FREE_BLOCKS * sizeof(umm_block));
-  DBGLOG_FORCE( force,   "  OOM Count         %5u\n", _context->UMM_OOM_COUNT);
-#if defined(UMM_STATS_FULL)
-  DBGLOG_FORCE( force,   "  Low Watermark     %5u\n", _context->stats.free_blocks_min * sizeof(umm_block));
-  DBGLOG_FORCE( force,   "  Low Watermark ISR %5u\n", _context->stats.free_blocks_isr_min * sizeof(umm_block));
-  DBGLOG_FORCE( force,   "  MAX Alloc Request %5u\n", _context->stats.alloc_max_size);
-#endif
-  DBGLOG_FORCE( force,   "  Size of umm_block %5u\n", sizeof(umm_block));
-  DBGLOG_FORCE( force, "+--------------------------------------------------------------+\n" );
+    umm_heap_context_t *_context = umm_get_current_heap();
+
+    DBGLOG_FORCE(force, "umm heap statistics:\n");
+    DBGLOG_FORCE(force,   "  Heap ID           %5u\n", _context->id);
+    DBGLOG_FORCE(force,   "  Free Space        %5u\n", _context->UMM_FREE_BLOCKS * sizeof(umm_block));
+    DBGLOG_FORCE(force,   "  OOM Count         %5u\n", _context->UMM_OOM_COUNT);
+    #if defined(UMM_STATS_FULL)
+    DBGLOG_FORCE(force,   "  Low Watermark     %5u\n", _context->stats.free_blocks_min * sizeof(umm_block));
+    DBGLOG_FORCE(force,   "  Low Watermark ISR %5u\n", _context->stats.free_blocks_isr_min * sizeof(umm_block));
+    DBGLOG_FORCE(force,   "  MAX Alloc Request %5u\n", _context->stats.alloc_max_size);
+    #endif
+    DBGLOG_FORCE(force,   "  Size of umm_block %5u\n", sizeof(umm_block));
+    DBGLOG_FORCE(force, "+--------------------------------------------------------------+\n");
 }
 #endif
 
@@ -214,9 +216,9 @@ int ICACHE_FLASH_ATTR umm_info_safe_printf_P(const char *fmt, ...) {
 }
 
 #if defined(UMM_STATS) || defined(UMM_STATS_FULL)
-size_t ICACHE_FLASH_ATTR umm_get_oom_count( void ) {
-  umm_heap_context_t *_context = umm_get_current_heap();
-  return _context->UMM_OOM_COUNT;
+size_t ICACHE_FLASH_ATTR umm_get_oom_count(void) {
+    umm_heap_context_t *_context = umm_get_current_heap();
+    return _context->UMM_OOM_COUNT;
 }
 #endif
 
@@ -228,69 +230,69 @@ size_t ICACHE_FLASH_ATTR umm_get_oom_count( void ) {
 //
 // If this is correct use alias.
 //
-size_t ICACHE_FLASH_ATTR umm_free_heap_size_lw_min( void ) {
-  umm_heap_context_t *_context = umm_get_current_heap();
-  return _context->stats.free_blocks_min * umm_block_size();
+size_t ICACHE_FLASH_ATTR umm_free_heap_size_lw_min(void) {
+    umm_heap_context_t *_context = umm_get_current_heap();
+    return _context->stats.free_blocks_min * umm_block_size();
 }
 
-size_t ICACHE_FLASH_ATTR umm_free_heap_size_min_reset( void ) {
-  umm_heap_context_t *_context = umm_get_current_heap();
-  _context->stats.free_blocks_min = _context->UMM_FREE_BLOCKS;
-  return _context->stats.free_blocks_min * umm_block_size();
+size_t ICACHE_FLASH_ATTR umm_free_heap_size_min_reset(void) {
+    umm_heap_context_t *_context = umm_get_current_heap();
+    _context->stats.free_blocks_min = _context->UMM_FREE_BLOCKS;
+    return _context->stats.free_blocks_min * umm_block_size();
 }
 
 #if 0 // TODO - Don't understand this why do both umm_free_heap_size_(lw_)min exist
 size_t umm_free_heap_size_min(void) __attribute__ ((alias("umm_free_heap_size_lw_min")));
 #else
-size_t ICACHE_FLASH_ATTR umm_free_heap_size_min( void ) {
-  umm_heap_context_t *_context = umm_get_current_heap();
-  return _context->stats.free_blocks_min * umm_block_size();
+size_t ICACHE_FLASH_ATTR umm_free_heap_size_min(void) {
+    umm_heap_context_t *_context = umm_get_current_heap();
+    return _context->stats.free_blocks_min * umm_block_size();
 }
 #endif
 
-size_t ICACHE_FLASH_ATTR umm_free_heap_size_isr_min( void ) {
-  umm_heap_context_t *_context = umm_get_current_heap();
-  return _context->stats.free_blocks_isr_min * umm_block_size();
+size_t ICACHE_FLASH_ATTR umm_free_heap_size_isr_min(void) {
+    umm_heap_context_t *_context = umm_get_current_heap();
+    return _context->stats.free_blocks_isr_min * umm_block_size();
 }
 
-size_t ICACHE_FLASH_ATTR umm_get_max_alloc_size( void ) {
-  umm_heap_context_t *_context = umm_get_current_heap();
-  return _context->stats.alloc_max_size;
+size_t ICACHE_FLASH_ATTR umm_get_max_alloc_size(void) {
+    umm_heap_context_t *_context = umm_get_current_heap();
+    return _context->stats.alloc_max_size;
 }
 
-size_t ICACHE_FLASH_ATTR umm_get_last_alloc_size( void ) {
-  umm_heap_context_t *_context = umm_get_current_heap();
-  return _context->stats.last_alloc_size;
+size_t ICACHE_FLASH_ATTR umm_get_last_alloc_size(void) {
+    umm_heap_context_t *_context = umm_get_current_heap();
+    return _context->stats.last_alloc_size;
 }
 
-size_t ICACHE_FLASH_ATTR umm_get_malloc_count( void ) {
-  umm_heap_context_t *_context = umm_get_current_heap();
-  return _context->stats.id_malloc_count;
+size_t ICACHE_FLASH_ATTR umm_get_malloc_count(void) {
+    umm_heap_context_t *_context = umm_get_current_heap();
+    return _context->stats.id_malloc_count;
 }
 
-size_t ICACHE_FLASH_ATTR umm_get_malloc_zero_count( void ) {
-  umm_heap_context_t *_context = umm_get_current_heap();
-  return _context->stats.id_malloc_zero_count;
+size_t ICACHE_FLASH_ATTR umm_get_malloc_zero_count(void) {
+    umm_heap_context_t *_context = umm_get_current_heap();
+    return _context->stats.id_malloc_zero_count;
 }
 
-size_t ICACHE_FLASH_ATTR umm_get_realloc_count( void ) {
-  umm_heap_context_t *_context = umm_get_current_heap();
-  return _context->stats.id_realloc_count;
+size_t ICACHE_FLASH_ATTR umm_get_realloc_count(void) {
+    umm_heap_context_t *_context = umm_get_current_heap();
+    return _context->stats.id_realloc_count;
 }
 
-size_t ICACHE_FLASH_ATTR umm_get_realloc_zero_count( void ) {
-  umm_heap_context_t *_context = umm_get_current_heap();
-  return _context->stats.id_realloc_zero_count;
+size_t ICACHE_FLASH_ATTR umm_get_realloc_zero_count(void) {
+    umm_heap_context_t *_context = umm_get_current_heap();
+    return _context->stats.id_realloc_zero_count;
 }
 
-size_t ICACHE_FLASH_ATTR umm_get_free_count( void ) {
-  umm_heap_context_t *_context = umm_get_current_heap();
-  return _context->stats.id_free_count;
+size_t ICACHE_FLASH_ATTR umm_get_free_count(void) {
+    umm_heap_context_t *_context = umm_get_current_heap();
+    return _context->stats.id_free_count;
 }
 
-size_t ICACHE_FLASH_ATTR umm_get_free_null_count( void ) {
-  umm_heap_context_t *_context = umm_get_current_heap();
-  return _context->stats.id_free_null_count;
+size_t ICACHE_FLASH_ATTR umm_get_free_null_count(void) {
+    umm_heap_context_t *_context = umm_get_current_heap();
+    return _context->stats.id_free_null_count;
 }
 #endif // UMM_STATS_FULL
 
diff --git a/cores/esp8266/umm_malloc/umm_local.h b/cores/esp8266/umm_malloc/umm_local.h
index f73b2a1832..a649780441 100644
--- a/cores/esp8266/umm_malloc/umm_local.h
+++ b/cores/esp8266/umm_malloc/umm_local.h
@@ -22,7 +22,7 @@
  * string while INTLEVEL is non-zero.
  */
 #undef DBGLOG_FORCE
-#define DBGLOG_FORCE(force, format, ...) {if(force) {UMM_INFO_PRINTF(format, ## __VA_ARGS__);}}
+#define DBGLOG_FORCE(force, format, ...) {if (force) {UMM_INFO_PRINTF(format,##__VA_ARGS__);}}
 // #define DBGLOG_FORCE(force, format, ...) {if(force) {::printf(PSTR(format), ## __VA_ARGS__);}}
 
 
@@ -37,7 +37,7 @@
 
 
 #if defined(UMM_POISON_CHECK_LITE)
-static bool check_poison_neighbors( umm_heap_context_t *_context, uint16_t cur );
+static bool check_poison_neighbors(umm_heap_context_t *_context, uint16_t cur);
 #endif
 
 
@@ -48,22 +48,22 @@ void ICACHE_FLASH_ATTR umm_print_stats(int force);
 
 
 int ICACHE_FLASH_ATTR umm_info_safe_printf_P(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
-#define UMM_INFO_PRINTF(fmt, ...) umm_info_safe_printf_P(PSTR(fmt), ##__VA_ARGS__)
+#define UMM_INFO_PRINTF(fmt, ...) umm_info_safe_printf_P(PSTR(fmt),##__VA_ARGS__)
 
 
 typedef struct umm_block_t umm_block;
 
 struct UMM_HEAP_CONTEXT {
-  umm_block *heap;
-  void *heap_end;
-#if (!defined(UMM_INLINE_METRICS) && defined(UMM_STATS)) || defined(UMM_STATS_FULL)
-  UMM_STATISTICS stats;
-#endif
-#ifdef UMM_INFO
-  UMM_HEAP_INFO info;
-#endif
-  unsigned short int numblocks;
-  unsigned char id;
+    umm_block *heap;
+    void *heap_end;
+    #if (!defined(UMM_INLINE_METRICS) && defined(UMM_STATS)) || defined(UMM_STATS_FULL)
+    UMM_STATISTICS stats;
+    #endif
+    #ifdef UMM_INFO
+    UMM_HEAP_INFO info;
+    #endif
+    unsigned short int numblocks;
+    unsigned char id;
 };
 
 
diff --git a/cores/esp8266/umm_malloc/umm_malloc.cpp b/cores/esp8266/umm_malloc/umm_malloc.cpp
index cfc55c8dba..694478cd2b 100644
--- a/cores/esp8266/umm_malloc/umm_malloc.cpp
+++ b/cores/esp8266/umm_malloc/umm_malloc.cpp
@@ -70,36 +70,36 @@ extern "C" {
 
 #include "dbglog/dbglog.h"
 
-//C This change is new in upstream umm_malloc.I think this would have created a
-//C breaking change. Keeping the old #define method in umm_malloc_cfg.h.
-//C I don't see a simple way of making it work. We would have to run code before
-//C the SDK has run to set a value for uint32_t UMM_MALLOC_CFG_HEAP_SIZE.
-//C On the other hand, a manual call to umm_init() before anything else has had a
-//C chance to run would mean that all those calls testing to see if the heap has
-//C been initialized at every umm_malloc API could be removed.
-//C
-//C before starting the NON OS SDK
-//C extern void *UMM_MALLOC_CFG_HEAP_ADDR;
-//C extern uint32_t UMM_MALLOC_CFG_HEAP_SIZE;
+// C This change is new in upstream umm_malloc.I think this would have created a
+// C breaking change. Keeping the old #define method in umm_malloc_cfg.h.
+// C I don't see a simple way of making it work. We would have to run code before
+// C the SDK has run to set a value for uint32_t UMM_MALLOC_CFG_HEAP_SIZE.
+// C On the other hand, a manual call to umm_init() before anything else has had a
+// C chance to run would mean that all those calls testing to see if the heap has
+// C been initialized at every umm_malloc API could be removed.
+// C
+// C before starting the NON OS SDK
+// C extern void *UMM_MALLOC_CFG_HEAP_ADDR;
+// C extern uint32_t UMM_MALLOC_CFG_HEAP_SIZE;
 
 #include "umm_local.h"      // target-dependent supplemental
 
 /* ------------------------------------------------------------------------- */
 
 UMM_H_ATTPACKPRE typedef struct umm_ptr_t {
-  uint16_t next;
-  uint16_t prev;
+    uint16_t next;
+    uint16_t prev;
 } UMM_H_ATTPACKSUF umm_ptr;
 
 
 UMM_H_ATTPACKPRE typedef struct umm_block_t {
-  union {
-    umm_ptr used;
-  } header;
-  union {
-    umm_ptr free;
-    uint8_t data[4];
-  } body;
+    union {
+        umm_ptr used;
+    } header;
+    union {
+        umm_ptr free;
+        uint8_t data[4];
+    } body;
 } UMM_H_ATTPACKSUF umm_block;
 
 #define UMM_FREELIST_MASK ((uint16_t)(0x8000))
@@ -125,85 +125,85 @@ static unsigned char umm_heap_stack[UMM_HEAP_STACK_DEPTH];
 
 #if (UMM_NUM_HEAPS == 1)
 size_t umm_get_current_heap_id(void) {
-  return 0;
+    return 0;
 }
 
 umm_heap_context_t *umm_get_current_heap(void) {
-  return &heap_context[0];
+    return &heap_context[0];
 }
 
-static umm_heap_context_t *umm_get_heap_by_id( size_t which ) {
-  (void)which;
-  return &heap_context[0];
+static umm_heap_context_t *umm_get_heap_by_id(size_t which) {
+    (void)which;
+    return &heap_context[0];
 }
 
-umm_heap_context_t *umm_set_heap_by_id( size_t which ) {
-  (void)which;
-  return &heap_context[0];
+umm_heap_context_t *umm_set_heap_by_id(size_t which) {
+    (void)which;
+    return &heap_context[0];
 }
 
 #else
 size_t umm_get_current_heap_id(void) {
-  return umm_heap_cur;
+    return umm_heap_cur;
 }
 
 umm_heap_context_t *umm_get_current_heap(void) {
-  return &heap_context[umm_heap_cur];
+    return &heap_context[umm_heap_cur];
 }
 
-static umm_heap_context_t *umm_get_heap_by_id( size_t which ) {
-  if (which < UMM_NUM_HEAPS) {
-    return &heap_context[which];
-  }
-  return NULL;
+static umm_heap_context_t *umm_get_heap_by_id(size_t which) {
+    if (which < UMM_NUM_HEAPS) {
+        return &heap_context[which];
+    }
+    return NULL;
 }
 
-umm_heap_context_t *umm_set_heap_by_id( size_t which ) {
-  umm_heap_context_t *_context = umm_get_heap_by_id(which);
-  if (_context && _context->heap) {
-    umm_heap_cur = which;
-    return _context;
-  }
-  return NULL;
+umm_heap_context_t *umm_set_heap_by_id(size_t which) {
+    umm_heap_context_t *_context = umm_get_heap_by_id(which);
+    if (_context && _context->heap) {
+        umm_heap_cur = which;
+        return _context;
+    }
+    return NULL;
 }
 #endif
 
 #if (UMM_NUM_HEAPS == 1)
-umm_heap_context_t *umm_push_heap( size_t which ) {
-  (void)which;
-  return &heap_context[0];
+umm_heap_context_t *umm_push_heap(size_t which) {
+    (void)which;
+    return &heap_context[0];
 }
 
-umm_heap_context_t *umm_pop_heap( void ) {
-  return &heap_context[0];
+umm_heap_context_t *umm_pop_heap(void) {
+    return &heap_context[0];
 }
 
-int umm_get_heap_stack_index( void ) {
-  return 0;
+int umm_get_heap_stack_index(void) {
+    return 0;
 }
 #else
 /* ------------------------------------------------------------------------ */
 
-umm_heap_context_t *umm_push_heap( size_t which ) {
-  if (umm_heap_stack_ptr < UMM_HEAP_STACK_DEPTH) {
-    umm_heap_stack[umm_heap_stack_ptr++] = umm_heap_cur;
-    return umm_set_heap_by_id( which );
-  }
-  return NULL;
+umm_heap_context_t *umm_push_heap(size_t which) {
+    if (umm_heap_stack_ptr < UMM_HEAP_STACK_DEPTH) {
+        umm_heap_stack[umm_heap_stack_ptr++] = umm_heap_cur;
+        return umm_set_heap_by_id(which);
+    }
+    return NULL;
 }
 
 /* ------------------------------------------------------------------------ */
 
-umm_heap_context_t *umm_pop_heap( void ) {
-  if (umm_heap_stack_ptr > 0 ) {
-    return umm_set_heap_by_id(umm_heap_stack[--umm_heap_stack_ptr]);
-  }
-  return NULL;
+umm_heap_context_t *umm_pop_heap(void) {
+    if (umm_heap_stack_ptr > 0) {
+        return umm_set_heap_by_id(umm_heap_stack[--umm_heap_stack_ptr]);
+    }
+    return NULL;
 }
 
 // Intended for diagnosic use
-int umm_get_heap_stack_index( void ) {
-  return umm_heap_stack_ptr;
+int umm_get_heap_stack_index(void) {
+    return umm_heap_stack_ptr;
 }
 #endif
 /* ------------------------------------------------------------------------ */
@@ -212,22 +212,22 @@ int umm_get_heap_stack_index( void ) {
  * realloc or free since you may not be in the right heap to handle it.
  *
  */
-static bool test_ptr_context( size_t which, void *ptr ) {
-  return
-    heap_context[which].heap &&
-    ptr >= (void *)heap_context[which].heap &&
-    ptr <          heap_context[which].heap_end;
+static bool test_ptr_context(size_t which, void *ptr) {
+    return
+        heap_context[which].heap &&
+        ptr >= (void *)heap_context[which].heap &&
+        ptr < heap_context[which].heap_end;
 }
 
 static umm_heap_context_t *umm_get_ptr_context(void *ptr) {
-  for (size_t i = 0; i < UMM_NUM_HEAPS; i++) {
-    if (test_ptr_context( i, ptr ) ) {
-      return umm_get_heap_by_id( i );
+    for (size_t i = 0; i < UMM_NUM_HEAPS; i++) {
+        if (test_ptr_context(i, ptr)) {
+            return umm_get_heap_by_id(i);
+        }
     }
-  }
 
-  panic();
-  return NULL;
+    panic();
+    return NULL;
 }
 
 #define UMM_NUMBLOCKS (_context->numblocks)
@@ -343,26 +343,26 @@ static void umm_split_block(
     umm_heap_context_t *_context,
     uint16_t c,
     uint16_t blocks,
-    uint16_t new_freemask ) {
+    uint16_t new_freemask) {
 
-  UMM_NBLOCK(c+blocks) = (UMM_NBLOCK(c) & UMM_BLOCKNO_MASK) | new_freemask;
-  UMM_PBLOCK(c+blocks) = c;
+    UMM_NBLOCK(c + blocks) = (UMM_NBLOCK(c) & UMM_BLOCKNO_MASK) | new_freemask;
+    UMM_PBLOCK(c + blocks) = c;
 
-  UMM_PBLOCK(UMM_NBLOCK(c) & UMM_BLOCKNO_MASK) = (c+blocks);
-  UMM_NBLOCK(c)                                = (c+blocks);
+    UMM_PBLOCK(UMM_NBLOCK(c) & UMM_BLOCKNO_MASK) = (c + blocks);
+    UMM_NBLOCK(c) = (c + blocks);
 }
 
 /* ------------------------------------------------------------------------ */
 
-static void umm_disconnect_from_free_list( umm_heap_context_t *_context, uint16_t c ) {
-  /* Disconnect this block from the FREE list */
+static void umm_disconnect_from_free_list(umm_heap_context_t *_context, uint16_t c) {
+    /* Disconnect this block from the FREE list */
 
-  UMM_NFREE(UMM_PFREE(c)) = UMM_NFREE(c);
-  UMM_PFREE(UMM_NFREE(c)) = UMM_PFREE(c);
+    UMM_NFREE(UMM_PFREE(c)) = UMM_NFREE(c);
+    UMM_PFREE(UMM_NFREE(c)) = UMM_PFREE(c);
 
-  /* And clear the free block indicator */
+    /* And clear the free block indicator */
 
-  UMM_NBLOCK(c) &= (~UMM_FREELIST_MASK);
+    UMM_NBLOCK(c) &= (~UMM_FREELIST_MASK);
 }
 
 /* ------------------------------------------------------------------------
@@ -371,28 +371,28 @@ static void umm_disconnect_from_free_list( umm_heap_context_t *_context, uint16_
  * next block is free.
  */
 
-static void umm_assimilate_up( umm_heap_context_t *_context, uint16_t c ) {
+static void umm_assimilate_up(umm_heap_context_t *_context, uint16_t c) {
 
-  if( UMM_NBLOCK(UMM_NBLOCK(c)) & UMM_FREELIST_MASK ) {
+    if (UMM_NBLOCK(UMM_NBLOCK(c)) & UMM_FREELIST_MASK) {
 
-    UMM_FRAGMENTATION_METRIC_REMOVE( UMM_NBLOCK(c) );
+        UMM_FRAGMENTATION_METRIC_REMOVE(UMM_NBLOCK(c));
 
-    /*
-     * The next block is a free block, so assimilate up and remove it from
-     * the free list
-     */
+        /*
+         * The next block is a free block, so assimilate up and remove it from
+         * the free list
+         */
 
-    DBGLOG_DEBUG( "Assimilate up to next block, which is FREE\n" );
+        DBGLOG_DEBUG("Assimilate up to next block, which is FREE\n");
 
-    /* Disconnect the next block from the FREE list */
+        /* Disconnect the next block from the FREE list */
 
-    umm_disconnect_from_free_list( _context, UMM_NBLOCK(c) );
+        umm_disconnect_from_free_list(_context, UMM_NBLOCK(c));
 
-    /* Assimilate the next block with this one */
+        /* Assimilate the next block with this one */
 
-    UMM_PBLOCK(UMM_NBLOCK(UMM_NBLOCK(c)) & UMM_BLOCKNO_MASK) = c;
-    UMM_NBLOCK(c) = UMM_NBLOCK(UMM_NBLOCK(c)) & UMM_BLOCKNO_MASK;
-  }
+        UMM_PBLOCK(UMM_NBLOCK(UMM_NBLOCK(c)) & UMM_BLOCKNO_MASK) = c;
+        UMM_NBLOCK(c) = UMM_NBLOCK(UMM_NBLOCK(c)) & UMM_BLOCKNO_MASK;
+    }
 }
 
 /* ------------------------------------------------------------------------
@@ -401,50 +401,50 @@ static void umm_assimilate_up( umm_heap_context_t *_context, uint16_t c ) {
  * up before assimilating down.
  */
 
-static uint16_t umm_assimilate_down( umm_heap_context_t *_context, uint16_t c, uint16_t freemask ) {
+static uint16_t umm_assimilate_down(umm_heap_context_t *_context, uint16_t c, uint16_t freemask) {
 
-  // We are going to assimilate down to the previous block because
-  // it was free, so remove it from the fragmentation metric
+    // We are going to assimilate down to the previous block because
+    // it was free, so remove it from the fragmentation metric
 
-  UMM_FRAGMENTATION_METRIC_REMOVE(UMM_PBLOCK(c));
+    UMM_FRAGMENTATION_METRIC_REMOVE(UMM_PBLOCK(c));
 
-  UMM_NBLOCK(UMM_PBLOCK(c)) = UMM_NBLOCK(c) | freemask;
-  UMM_PBLOCK(UMM_NBLOCK(c)) = UMM_PBLOCK(c);
+    UMM_NBLOCK(UMM_PBLOCK(c)) = UMM_NBLOCK(c) | freemask;
+    UMM_PBLOCK(UMM_NBLOCK(c)) = UMM_PBLOCK(c);
 
-  if (freemask) {
-      // We are going to free the entire assimilated block
-      // so add it to the fragmentation metric. A good
-      // compiler will optimize away the empty if statement
-      // when UMM_INFO is not defined, so don't worry about
-      // guarding it.
+    if (freemask) {
+        // We are going to free the entire assimilated block
+        // so add it to the fragmentation metric. A good
+        // compiler will optimize away the empty if statement
+        // when UMM_INFO is not defined, so don't worry about
+        // guarding it.
 
-      UMM_FRAGMENTATION_METRIC_ADD(UMM_PBLOCK(c));
-  }
+        UMM_FRAGMENTATION_METRIC_ADD(UMM_PBLOCK(c));
+    }
 
-  return( UMM_PBLOCK(c) );
+    return UMM_PBLOCK(c);
 }
 
 /* ------------------------------------------------------------------------- */
 
-static void umm_init_stage_2( umm_heap_context_t *_context ) {
-  /* setup initial blank heap structure */
+static void umm_init_stage_2(umm_heap_context_t *_context) {
+    /* setup initial blank heap structure */
     UMM_FRAGMENTATION_METRIC_INIT();
 
     /* init stats.free_blocks */
-#if defined(UMM_STATS) || defined(UMM_STATS_FULL)
-#if defined(UMM_STATS_FULL)
+    #if defined(UMM_STATS) || defined(UMM_STATS_FULL)
+    #if defined(UMM_STATS_FULL)
     _context->stats.free_blocks_min =
-    _context->stats.free_blocks_isr_min  = UMM_NUMBLOCKS - 2;
-#endif
-#ifndef UMM_INLINE_METRICS
+        _context->stats.free_blocks_isr_min = UMM_NUMBLOCKS - 2;
+    #endif
+    #ifndef UMM_INLINE_METRICS
     _context->stats.free_blocks = UMM_NUMBLOCKS - 2;
-#endif
-#endif
+    #endif
+    #endif
 
     /* Set up umm_block[0], which just points to umm_block[1] */
     UMM_NBLOCK(0) = 1;
-    UMM_NFREE(0)  = 1;
-    UMM_PFREE(0)  = 1;
+    UMM_NFREE(0) = 1;
+    UMM_PFREE(0) = 1;
 
     /*
      * Now, we need to set the whole heap space as a huge free block. We should
@@ -479,50 +479,50 @@ static void umm_init_stage_2( umm_heap_context_t *_context ) {
 }
 
 
-void umm_init_common( size_t id, void *start_addr, size_t size, bool zero ) {
-  /* Preserve internal setup */
-  umm_heap_context_t *_context = umm_get_heap_by_id(id);
-  if (NULL == start_addr || NULL == _context || _context->heap) {
-    return;
-  }
-
-  /* init heap pointer and size, and memset it to 0 */
-  _context->id        = id;
-  _context->heap      = (umm_block *)start_addr;
-  _context->heap_end  = (void *)((uintptr_t)start_addr + size);
-  _context->numblocks = (size / sizeof(umm_block));
-
-  // An option for blocking the zeroing of extra heaps allows for performing
-  // post-crash discovery.
-  if (zero) {
-  	memset(_context->heap, 0x00, size);
-#if (!defined(UMM_INLINE_METRICS) && defined(UMM_STATS)) || defined(UMM_STATS_FULL)
-    memset(&_context->stats, 0x00, sizeof(_context->stats));
-#endif
+void umm_init_common(size_t id, void *start_addr, size_t size, bool zero) {
+    /* Preserve internal setup */
+    umm_heap_context_t *_context = umm_get_heap_by_id(id);
+    if (NULL == start_addr || NULL == _context || _context->heap) {
+        return;
+    }
 
-    /* Set up internal data structures */
-    umm_init_stage_2(_context);
-  }
+    /* init heap pointer and size, and memset it to 0 */
+    _context->id = id;
+    _context->heap = (umm_block *)start_addr;
+    _context->heap_end = (void *)((uintptr_t)start_addr + size);
+    _context->numblocks = (size / sizeof(umm_block));
+
+    // An option for blocking the zeroing of extra heaps allows for performing
+    // post-crash discovery.
+    if (zero) {
+        memset(_context->heap, 0x00, size);
+        #if (!defined(UMM_INLINE_METRICS) && defined(UMM_STATS)) || defined(UMM_STATS_FULL)
+        memset(&_context->stats, 0x00, sizeof(_context->stats));
+        #endif
+
+        /* Set up internal data structures */
+        umm_init_stage_2(_context);
+    }
 }
 
-void umm_init( void ) {
-  // if (umm_heap) {
-  //   return;
-  // }
-  for (size_t i = 0; i < UMM_NUM_HEAPS; i++) {
-    heap_context[i].heap = NULL;
-  }
-  memset(&heap_context[0], 0, sizeof(heap_context));
-  umm_init_common( UMM_HEAP_DRAM, (void *)UMM_MALLOC_CFG_HEAP_ADDR, UMM_MALLOC_CFG_HEAP_SIZE, true );
-  // umm_heap = (void *)&heap_context;
+void umm_init(void) {
+    // if (umm_heap) {
+    //   return;
+    // }
+    for (size_t i = 0; i < UMM_NUM_HEAPS; i++) {
+        heap_context[i].heap = NULL;
+    }
+    memset(&heap_context[0], 0, sizeof(heap_context));
+    umm_init_common(UMM_HEAP_DRAM, (void *)UMM_MALLOC_CFG_HEAP_ADDR, UMM_MALLOC_CFG_HEAP_SIZE, true);
+    // umm_heap = (void *)&heap_context;
 }
 
 #ifdef UMM_HEAP_IRAM
-void umm_init_iram_ex( void *addr, unsigned int size, bool zero ) {
-  /* We need the main, internal heap set up first */
-  UMM_INIT_HEAP;
+void umm_init_iram_ex(void *addr, unsigned int size, bool zero) {
+    /* We need the main, internal heap set up first */
+    UMM_INIT_HEAP;
 
-  umm_init_common(UMM_HEAP_IRAM, addr, size, zero);
+    umm_init_common(UMM_HEAP_IRAM, addr, size, zero);
 }
 
 void _text_end(void);
@@ -534,16 +534,16 @@ void umm_init_iram(void) __attribute__((weak));
   dedicated to a sketch and possibly used/preserved across reboots.
  */
 void umm_init_iram(void) {
-  umm_init_iram_ex(mmu_sec_heap(), mmu_sec_heap_size(), true);
+    umm_init_iram_ex(mmu_sec_heap(), mmu_sec_heap_size(), true);
 }
-#endif	// #ifdef UMM_HEAP_IRAM
+#endif  // #ifdef UMM_HEAP_IRAM
 
 #ifdef UMM_HEAP_EXTERNAL
-void umm_init_vm( void *vmaddr, unsigned int vmsize ) {
-  /* We need the main, internal (DRAM) heap set up first */
-  UMM_INIT_HEAP;
+void umm_init_vm(void *vmaddr, unsigned int vmsize) {
+    /* We need the main, internal (DRAM) heap set up first */
+    UMM_INIT_HEAP;
 
-  umm_init_common(UMM_HEAP_EXTERNAL, vmaddr, vmsize, true);
+    umm_init_common(UMM_HEAP_EXTERNAL, vmaddr, vmsize, true);
 }
 #endif
 
@@ -552,87 +552,87 @@ void umm_init_vm( void *vmaddr, unsigned int vmsize ) {
  * UMM_CRITICAL_ENTRY() and UMM_CRITICAL_EXIT().
  */
 
-static void umm_free_core( umm_heap_context_t *_context, void *ptr ) {
+static void umm_free_core(umm_heap_context_t *_context, void *ptr) {
 
-  uint16_t c;
+    uint16_t c;
 
-  if (NULL == _context) {
-    panic();
-    return;
-  }
+    if (NULL == _context) {
+        panic();
+        return;
+    }
 
-  STATS__FREE_REQUEST(id_free);
-  /*
-   * FIXME: At some point it might be a good idea to add a check to make sure
-   *        that the pointer we're being asked to free up is actually within
-   *        the umm_heap!
-   *
-   * NOTE:  See the new umm_info() function that you can use to see if a ptr is
-   *        on the free list!
-   */
+    STATS__FREE_REQUEST(id_free);
+    /*
+     * FIXME: At some point it might be a good idea to add a check to make sure
+     *        that the pointer we're being asked to free up is actually within
+     *        the umm_heap!
+     *
+     * NOTE:  See the new umm_info() function that you can use to see if a ptr is
+     *        on the free list!
+     */
 
-  /* Figure out which block we're in. Note the use of truncated division... */
+    /* Figure out which block we're in. Note the use of truncated division... */
 
-  c = (((uintptr_t)ptr)-(uintptr_t)(&(_context->heap[0])))/sizeof(umm_block);
+    c = (((uintptr_t)ptr) - (uintptr_t)(&(_context->heap[0]))) / sizeof(umm_block);
 
-  DBGLOG_DEBUG( "Freeing block %6d\n", c );
+    DBGLOG_DEBUG("Freeing block %6d\n", c);
 
-  /* Update stats Free Block count */
-  STATS__FREE_BLOCKS_UPDATE(UMM_NBLOCK(c) - c);
+    /* Update stats Free Block count */
+    STATS__FREE_BLOCKS_UPDATE(UMM_NBLOCK(c) - c);
 
-  /* Now let's assimilate this block with the next one if possible. */
+    /* Now let's assimilate this block with the next one if possible. */
 
-  umm_assimilate_up( _context, c );
+    umm_assimilate_up(_context, c);
 
-  /* Then assimilate with the previous block if possible */
+    /* Then assimilate with the previous block if possible */
 
-  if( UMM_NBLOCK(UMM_PBLOCK(c)) & UMM_FREELIST_MASK ) {
+    if (UMM_NBLOCK(UMM_PBLOCK(c)) & UMM_FREELIST_MASK) {
 
-    DBGLOG_DEBUG( "Assimilate down to previous block, which is FREE\n" );
+        DBGLOG_DEBUG("Assimilate down to previous block, which is FREE\n");
 
-    c = umm_assimilate_down(_context, c, UMM_FREELIST_MASK);
-  } else {
-    /*
-     * The previous block is not a free block, so add this one to the head
-     * of the free list
-     */
-    UMM_FRAGMENTATION_METRIC_ADD(c);
+        c = umm_assimilate_down(_context, c, UMM_FREELIST_MASK);
+    } else {
+        /*
+         * The previous block is not a free block, so add this one to the head
+         * of the free list
+         */
+        UMM_FRAGMENTATION_METRIC_ADD(c);
 
-    DBGLOG_DEBUG( "Just add to head of free list\n" );
+        DBGLOG_DEBUG("Just add to head of free list\n");
 
-    UMM_PFREE(UMM_NFREE(0)) = c;
-    UMM_NFREE(c)            = UMM_NFREE(0);
-    UMM_PFREE(c)            = 0;
-    UMM_NFREE(0)            = c;
+        UMM_PFREE(UMM_NFREE(0)) = c;
+        UMM_NFREE(c) = UMM_NFREE(0);
+        UMM_PFREE(c)            = 0;
+        UMM_NFREE(0) = c;
 
-    UMM_NBLOCK(c)          |= UMM_FREELIST_MASK;
-  }
+        UMM_NBLOCK(c) |= UMM_FREELIST_MASK;
+    }
 }
 
 /* ------------------------------------------------------------------------ */
 
-void umm_free( void *ptr ) {
-  UMM_CRITICAL_DECL(id_free);
+void umm_free(void *ptr) {
+    UMM_CRITICAL_DECL(id_free);
 
-  UMM_INIT_HEAP;
+    UMM_INIT_HEAP;
 
-  /* If we're being asked to free a NULL pointer, well that's just silly! */
+    /* If we're being asked to free a NULL pointer, well that's just silly! */
 
-  if( (void *)0 == ptr ) {
-    DBGLOG_DEBUG( "free a null pointer -> do nothing\n" );
-    STATS__NULL_FREE_REQUEST(id_free);
+    if ((void *)0 == ptr) {
+        DBGLOG_DEBUG("free a null pointer -> do nothing\n");
+        STATS__NULL_FREE_REQUEST(id_free);
 
-    return;
-  }
+        return;
+    }
 
-  /* Free the memory within a protected critical section */
+    /* Free the memory within a protected critical section */
 
-  UMM_CRITICAL_ENTRY(id_free);
+    UMM_CRITICAL_ENTRY(id_free);
 
-  /* Need to be in the heap in which this block lives */
-  umm_free_core( umm_get_ptr_context( ptr ), ptr );
+    /* Need to be in the heap in which this block lives */
+    umm_free_core(umm_get_ptr_context(ptr), ptr);
 
-  UMM_CRITICAL_EXIT(id_free);
+    UMM_CRITICAL_EXIT(id_free);
 }
 
 /* ------------------------------------------------------------------------
@@ -640,328 +640,329 @@ void umm_free( void *ptr ) {
  * UMM_CRITICAL_ENTRY() and UMM_CRITICAL_EXIT().
  */
 
-static void *umm_malloc_core( umm_heap_context_t *_context, size_t size ) {
-  uint16_t blocks;
-  uint16_t blockSize = 0;
+static void *umm_malloc_core(umm_heap_context_t *_context, size_t size) {
+    uint16_t blocks;
+    uint16_t blockSize = 0;
 
-  uint16_t bestSize;
-  uint16_t bestBlock;
+    uint16_t bestSize;
+    uint16_t bestBlock;
 
-  uint16_t cf;
+    uint16_t cf;
 
-  STATS__ALLOC_REQUEST(id_malloc, size);
+    STATS__ALLOC_REQUEST(id_malloc, size);
 
-  if (NULL == _context) {
-    panic();
-    return NULL;
-  }
+    if (NULL == _context) {
+        panic();
+        return NULL;
+    }
 
-  blocks = umm_blocks( size );
+    blocks = umm_blocks(size);
 
-  /*
-   * Now we can scan through the free list until we find a space that's big
-   * enough to hold the number of blocks we need.
-   *
-   * This part may be customized to be a best-fit, worst-fit, or first-fit
-   * algorithm
-   */
+    /*
+     * Now we can scan through the free list until we find a space that's big
+     * enough to hold the number of blocks we need.
+     *
+     * This part may be customized to be a best-fit, worst-fit, or first-fit
+     * algorithm
+     */
 
-  cf = UMM_NFREE(0);
+    cf = UMM_NFREE(0);
 
-  bestBlock = UMM_NFREE(0);
-  bestSize  = 0x7FFF;
+    bestBlock = UMM_NFREE(0);
+    bestSize = 0x7FFF;
 
-  while( cf ) {
-    blockSize = (UMM_NBLOCK(cf) & UMM_BLOCKNO_MASK) - cf;
+    while (cf) {
+        blockSize = (UMM_NBLOCK(cf) & UMM_BLOCKNO_MASK) - cf;
 
-    DBGLOG_TRACE( "Looking at block %6d size %6d\n", cf, blockSize );
+        DBGLOG_TRACE("Looking at block %6d size %6d\n", cf, blockSize);
 
-#if defined UMM_BEST_FIT
-    if( (blockSize >= blocks) && (blockSize < bestSize) ) {
-      bestBlock = cf;
-      bestSize  = blockSize;
+        #if defined UMM_BEST_FIT
+        if ((blockSize >= blocks) && (blockSize < bestSize)) {
+            bestBlock = cf;
+            bestSize = blockSize;
+        }
+        #elif defined UMM_FIRST_FIT
+        /* This is the first block that fits! */
+        if ((blockSize >= blocks)) {
+            break;
+        }
+        #else
+        #error "No UMM_*_FIT is defined - check umm_malloc_cfg.h"
+        #endif
+
+        cf = UMM_NFREE(cf);
     }
-#elif defined UMM_FIRST_FIT
-    /* This is the first block that fits! */
-    if( (blockSize >= blocks) )
-      break;
-#else
-#  error "No UMM_*_FIT is defined - check umm_malloc_cfg.h"
-#endif
 
-    cf = UMM_NFREE(cf);
-  }
+    if (0x7FFF != bestSize) {
+        cf = bestBlock;
+        blockSize = bestSize;
+    }
 
-  if( 0x7FFF != bestSize ) {
-    cf        = bestBlock;
-    blockSize = bestSize;
-  }
+    POISON_CHECK_NEIGHBORS(cf);
 
-  POISON_CHECK_NEIGHBORS(cf);
+    if (UMM_NBLOCK(cf) & UMM_BLOCKNO_MASK && blockSize >= blocks) {
 
-  if( UMM_NBLOCK(cf) & UMM_BLOCKNO_MASK && blockSize >= blocks ) {
+        UMM_FRAGMENTATION_METRIC_REMOVE(cf);
 
-    UMM_FRAGMENTATION_METRIC_REMOVE(cf);
+        /*
+         * This is an existing block in the memory heap, we just need to split off
+         * what we need, unlink it from the free list and mark it as in use, and
+         * link the rest of the block back into the freelist as if it was a new
+         * block on the free list...
+         */
 
-    /*
-     * This is an existing block in the memory heap, we just need to split off
-     * what we need, unlink it from the free list and mark it as in use, and
-     * link the rest of the block back into the freelist as if it was a new
-     * block on the free list...
-     */
+        if (blockSize == blocks) {
+            /* It's an exact fit and we don't need to split off a block. */
+            DBGLOG_DEBUG("Allocating %6d blocks starting at %6d - exact\n", blocks, cf);
+
+            /* Disconnect this block from the FREE list */
+
+            umm_disconnect_from_free_list(_context, cf);
+
+        } else {
 
-    if( blockSize == blocks ) {
-      /* It's an exact fit and we don't need to split off a block. */
-      DBGLOG_DEBUG( "Allocating %6d blocks starting at %6d - exact\n", blocks, cf );
+            /* It's not an exact fit and we need to split off a block. */
+            DBGLOG_DEBUG("Allocating %6d blocks starting at %6d - existing\n", blocks, cf);
 
-      /* Disconnect this block from the FREE list */
+            /*
+             * split current free block `cf` into two blocks. The first one will be
+             * returned to user, so it's not free, and the second one will be free.
+             */
+            umm_split_block(_context, cf, blocks, UMM_FREELIST_MASK /*new block is free*/);
 
-      umm_disconnect_from_free_list( _context, cf );
+            UMM_FRAGMENTATION_METRIC_ADD(UMM_NBLOCK(cf));
 
+            /*
+             * `umm_split_block()` does not update the free pointers (it affects
+             * only free flags), but effectively we've just moved beginning of the
+             * free block from `cf` to `cf + blocks`. So we have to adjust pointers
+             * to and from adjacent free blocks.
+             */
+
+            /* previous free block */
+            UMM_NFREE(UMM_PFREE(cf)) = cf + blocks;
+            UMM_PFREE(cf + blocks) = UMM_PFREE(cf);
+
+            /* next free block */
+            UMM_PFREE(UMM_NFREE(cf)) = cf + blocks;
+            UMM_NFREE(cf + blocks) = UMM_NFREE(cf);
+        }
+
+        STATS__FREE_BLOCKS_UPDATE(-blocks);
+        STATS__FREE_BLOCKS_MIN();
     } else {
+        /* Out of memory */
+        STATS__OOM_UPDATE();
 
-      /* It's not an exact fit and we need to split off a block. */
-      DBGLOG_DEBUG( "Allocating %6d blocks starting at %6d - existing\n", blocks, cf );
+        DBGLOG_DEBUG("Can't allocate %5d blocks\n", blocks);
 
-      /*
-       * split current free block `cf` into two blocks. The first one will be
-       * returned to user, so it's not free, and the second one will be free.
-       */
-      umm_split_block( _context, cf, blocks, UMM_FREELIST_MASK /*new block is free*/ );
+        return (void *)NULL;
+    }
 
-      UMM_FRAGMENTATION_METRIC_ADD(UMM_NBLOCK(cf));
+    return (void *)&UMM_DATA(cf);
+}
 
-      /*
-       * `umm_split_block()` does not update the free pointers (it affects
-       * only free flags), but effectively we've just moved beginning of the
-       * free block from `cf` to `cf + blocks`. So we have to adjust pointers
-       * to and from adjacent free blocks.
-       */
+/* ------------------------------------------------------------------------ */
 
-      /* previous free block */
-      UMM_NFREE( UMM_PFREE(cf) ) = cf + blocks;
-      UMM_PFREE( cf + blocks ) = UMM_PFREE(cf);
+void *umm_malloc(size_t size) {
+    UMM_CRITICAL_DECL(id_malloc);
 
-      /* next free block */
-      UMM_PFREE( UMM_NFREE(cf) ) = cf + blocks;
-      UMM_NFREE( cf + blocks ) = UMM_NFREE(cf);
+    void *ptr = NULL;
+
+    UMM_INIT_HEAP;
+
+    /*
+     * "Is it safe"
+     *
+     * Is it safe to call from an ISR? Is there a point during a malloc that a
+     * an interrupt and subsequent call to malloc result in undesired results?
+     *
+     * Heap selection in managed by the functions umm_push_heap, umm_pop_heap,
+     * umm_get_current_heap_id, and umm_set_heap_by_id. These functions are
+     * responsible for getting/setting the module static variable umm_heap_cur.
+     * The umm_heap_cur variable is an index that is used to select the current
+     * heap context. Depending on the situation this selection can be overriddened.
+     *
+     * All variables for a specific Heap are in a single structure. `heap_context`
+     * is an array of these structures. Each heap API function uses a function
+     * local variable `_context` to hold a pointer to the selected heap structure.
+     * This local pointer is referenced for all the "selected heap" operations.
+     * Coupled with critical sections around global data should allow the API
+     * functions to be reentrant.
+     *
+     * Using the `_context` name throughout made it easy to incorporate the
+     * context into existing macros.
+     *
+     * For allocating APIs `umm_heap_cur` is used to index and select a value for
+     * `_context`. If an allocation is made from an ISR, this value is ignored and
+     * the heap context for DRAM is loaded. For APIs that require operating on an
+     * existing allocation such as realloc and free, the heap context selected is
+     * done by matching the allocation's address with that of one of the heap
+     * address ranges.
+     *
+     * I think we are safe with multiple heaps when the non32-bit exception
+     * handler is used, as long as interrupts don't get enabled. There was a
+     * window in the Boot ROM "C" Exception Wrapper that would enable interrupts
+     * when running our non32-exception handler; however, that should be resolved
+     * by our replacement wrapper. For more information on exception handling
+     * issues for IRAM see comments above `_set_exception_handler_wrapper()` in
+     * `core_esp8266_non32xfer.cpp`.
+     *
+     * ISRs should not try and change heaps. umm_malloc will ignore the change.
+     * All should be fine as long as the caller puts the heap back the way it was.
+     * On return, everything must be the same. The foreground thread will continue
+     * with the same information that was there before the interrupt. All malloc()
+     * requests made from an ISR are fulfilled with DRAM.
+     *
+     * For umm_malloc, heap selection involves changing a single variable that is
+     * on the calling context stack. From the umm_mallac side, that variable is
+     * used to load a context pointer by index, heap ID. While an umm_malloc API
+     * function is running, all heap related variables are in the context variable
+     * pointer, registers, or the current stack as the request is processed. With
+     * a single variable to reference for heap selection, I think it is unlikely
+     * that umm_malloc can be called, with things in an unusable transition state.
+     */
+
+    umm_heap_context_t *_context = umm_get_current_heap();
+
+    /*
+     * the very first thing we do is figure out if we're being asked to allocate
+     * a size of 0 - and if we are we'll simply return a null pointer. if not
+     * then reduce the size by 1 byte so that the subsequent calculations on
+     * the number of blocks to allocate are easier...
+     */
+
+    if (0 == size) {
+        DBGLOG_DEBUG("malloc a block of 0 bytes -> do nothing\n");
+        STATS__ZERO_ALLOC_REQUEST(id_malloc, size);
+
+        return ptr;
     }
 
-    STATS__FREE_BLOCKS_UPDATE( -blocks );
-    STATS__FREE_BLOCKS_MIN();
-  } else {
-    /* Out of memory */
-    STATS__OOM_UPDATE();
+    /* Allocate the memory within a protected critical section */
 
-    DBGLOG_DEBUG(  "Can't allocate %5d blocks\n", blocks );
+    UMM_CRITICAL_ENTRY(id_malloc);
 
-    return( (void *)NULL );
-  }
+    /*
+     * We handle the realloc of an existing IRAM allocation from an ISR with IRAM,
+     * while a new malloc from an ISR will always supply DRAM. That said, realloc
+     * from an ISR is not generally safe without special locking mechanisms and is
+     * not formally supported.
+     *
+     * Additionally, to avoid extending the IRQs disabled period, it is best to
+     * use DRAM for an ISR. Each 16-bit access to IRAM that umm_malloc has to make
+     * requires a pass through the exception handling logic.
+     */
+    if (UMM_CRITICAL_WITHINISR(id_malloc)) {
+        _context = umm_get_heap_by_id(UMM_HEAP_DRAM);
+    }
 
-  return( (void *)&UMM_DATA(cf) );
-}
+    ptr = umm_malloc_core(_context, size);
 
-/* ------------------------------------------------------------------------ */
+    UMM_CRITICAL_EXIT(id_malloc);
 
-void *umm_malloc( size_t size ) {
-  UMM_CRITICAL_DECL(id_malloc);
-
-  void *ptr = NULL;
-
-  UMM_INIT_HEAP;
-
-  /*
-   * "Is it safe"
-   *
-   * Is it safe to call from an ISR? Is there a point during a malloc that a
-   * an interrupt and subsequent call to malloc result in undesired results?
-   *
-   * Heap selection in managed by the functions umm_push_heap, umm_pop_heap,
-   * umm_get_current_heap_id, and umm_set_heap_by_id. These functions are
-   * responsible for getting/setting the module static variable umm_heap_cur.
-   * The umm_heap_cur variable is an index that is used to select the current
-   * heap context. Depending on the situation this selection can be overriddened.
-   *
-   * All variables for a specific Heap are in a single structure. `heap_context`
-   * is an array of these structures. Each heap API function uses a function
-   * local variable `_context` to hold a pointer to the selected heap structure.
-   * This local pointer is referenced for all the "selected heap" operations.
-   * Coupled with critical sections around global data should allow the API
-   * functions to be reentrant.
-   *
-   * Using the `_context` name throughout made it easy to incorporate the
-   * context into existing macros.
-   *
-   * For allocating APIs `umm_heap_cur` is used to index and select a value for
-   * `_context`. If an allocation is made from an ISR, this value is ignored and
-   * the heap context for DRAM is loaded. For APIs that require operating on an
-   * existing allocation such as realloc and free, the heap context selected is
-   * done by matching the allocation's address with that of one of the heap
-   * address ranges.
-   *
-   * I think we are safe with multiple heaps when the non32-bit exception
-   * handler is used, as long as interrupts don't get enabled. There was a
-   * window in the Boot ROM "C" Exception Wrapper that would enable interrupts
-   * when running our non32-exception handler; however, that should be resolved
-   * by our replacement wrapper. For more information on exception handling
-   * issues for IRAM see comments above `_set_exception_handler_wrapper()` in
-   * `core_esp8266_non32xfer.cpp`.
-   *
-   * ISRs should not try and change heaps. umm_malloc will ignore the change.
-   * All should be fine as long as the caller puts the heap back the way it was.
-   * On return, everything must be the same. The foreground thread will continue
-   * with the same information that was there before the interrupt. All malloc()
-   * requests made from an ISR are fulfilled with DRAM.
-   *
-   * For umm_malloc, heap selection involves changing a single variable that is
-   * on the calling context stack. From the umm_mallac side, that variable is
-   * used to load a context pointer by index, heap ID. While an umm_malloc API
-   * function is running, all heap related variables are in the context variable
-   * pointer, registers, or the current stack as the request is processed. With
-   * a single variable to reference for heap selection, I think it is unlikely
-   * that umm_malloc can be called, with things in an unusable transition state.
-   */
-
-  umm_heap_context_t *_context = umm_get_current_heap();
-
-  /*
-   * the very first thing we do is figure out if we're being asked to allocate
-   * a size of 0 - and if we are we'll simply return a null pointer. if not
-   * then reduce the size by 1 byte so that the subsequent calculations on
-   * the number of blocks to allocate are easier...
-   */
-
-  if( 0 == size ) {
-    DBGLOG_DEBUG( "malloc a block of 0 bytes -> do nothing\n" );
-    STATS__ZERO_ALLOC_REQUEST(id_malloc, size);
-
-    return( ptr );
-  }
-
-  /* Allocate the memory within a protected critical section */
-
-  UMM_CRITICAL_ENTRY(id_malloc);
-
-  /*
-   * We handle the realloc of an existing IRAM allocation from an ISR with IRAM,
-   * while a new malloc from an ISR will always supply DRAM. That said, realloc
-   * from an ISR is not generally safe without special locking mechanisms and is
-   * not formally supported.
-   *
-   * Additionally, to avoid extending the IRQs disabled period, it is best to
-   * use DRAM for an ISR. Each 16-bit access to IRAM that umm_malloc has to make
-   * requires a pass through the exception handling logic.
-   */
-  if (UMM_CRITICAL_WITHINISR(id_malloc)) {
-    _context = umm_get_heap_by_id(UMM_HEAP_DRAM);
-  }
-
-  ptr = umm_malloc_core( _context, size );
-
-  UMM_CRITICAL_EXIT(id_malloc);
-
-  return( ptr );
+    return ptr;
 }
 
 /* ------------------------------------------------------------------------ */
 
-void *umm_realloc( void *ptr, size_t size ) {
-  UMM_CRITICAL_DECL(id_realloc);
+void *umm_realloc(void *ptr, size_t size) {
+    UMM_CRITICAL_DECL(id_realloc);
 
-  uint16_t blocks;
-  uint16_t blockSize;
-  uint16_t prevBlockSize = 0;
-  uint16_t nextBlockSize = 0;
+    uint16_t blocks;
+    uint16_t blockSize;
+    uint16_t prevBlockSize = 0;
+    uint16_t nextBlockSize = 0;
 
-  uint16_t c;
+    uint16_t c;
 
-  size_t curSize;
+    size_t curSize;
 
-  UMM_INIT_HEAP;
+    UMM_INIT_HEAP;
 
-  /*
-   * This code looks after the case of a NULL value for ptr. The ANSI C
-   * standard says that if ptr is NULL and size is non-zero, then we've
-   * got to work the same a malloc(). If size is also 0, then our version
-   * of malloc() returns a NULL pointer, which is OK as far as the ANSI C
-   * standard is concerned.
-   */
+    /*
+     * This code looks after the case of a NULL value for ptr. The ANSI C
+     * standard says that if ptr is NULL and size is non-zero, then we've
+     * got to work the same a malloc(). If size is also 0, then our version
+     * of malloc() returns a NULL pointer, which is OK as far as the ANSI C
+     * standard is concerned.
+     */
 
-  if( ((void *)NULL == ptr) ) {
-    DBGLOG_DEBUG( "realloc the NULL pointer - call malloc()\n" );
+    if (((void *)NULL == ptr)) {
+        DBGLOG_DEBUG("realloc the NULL pointer - call malloc()\n");
 
-    return( umm_malloc(size) );
-  }
+        return umm_malloc(size);
+    }
 
-  /*
-   * Now we're sure that we have a non_NULL ptr, but we're not sure what
-   * we should do with it. If the size is 0, then the ANSI C standard says that
-   * we should operate the same as free.
-   */
+    /*
+     * Now we're sure that we have a non_NULL ptr, but we're not sure what
+     * we should do with it. If the size is 0, then the ANSI C standard says that
+     * we should operate the same as free.
+     */
 
-  /* Need to be in the heap in which this block lives */
-  umm_heap_context_t *_context = umm_get_ptr_context( ptr );
-  if (NULL == _context) {
-    panic();
-    return NULL;
-  }
+    /* Need to be in the heap in which this block lives */
+    umm_heap_context_t *_context = umm_get_ptr_context(ptr);
+    if (NULL == _context) {
+        panic();
+        return NULL;
+    }
 
-  if( 0 == size ) {
-    DBGLOG_DEBUG( "realloc to 0 size, just free the block\n" );
-    STATS__ZERO_ALLOC_REQUEST(id_realloc, size);
+    if (0 == size) {
+        DBGLOG_DEBUG("realloc to 0 size, just free the block\n");
+        STATS__ZERO_ALLOC_REQUEST(id_realloc, size);
 
-    umm_free( ptr );
+        umm_free(ptr);
 
-    return( (void *)NULL );
-  }
+        return (void *)NULL;
+    }
 
-  STATS__ALLOC_REQUEST(id_realloc, size);
+    STATS__ALLOC_REQUEST(id_realloc, size);
 
-  /*
-   * Otherwise we need to actually do a reallocation. A naiive approach
-   * would be to malloc() a new block of the correct size, copy the old data
-   * to the new block, and then free the old block.
-   *
-   * While this will work, we end up doing a lot of possibly unnecessary
-   * copying. So first, let's figure out how many blocks we'll need.
-   */
+    /*
+     * Otherwise we need to actually do a reallocation. A naiive approach
+     * would be to malloc() a new block of the correct size, copy the old data
+     * to the new block, and then free the old block.
+     *
+     * While this will work, we end up doing a lot of possibly unnecessary
+     * copying. So first, let's figure out how many blocks we'll need.
+     */
 
-  blocks = umm_blocks( size );
+    blocks = umm_blocks(size);
 
-  /* Figure out which block we're in. Note the use of truncated division... */
+    /* Figure out which block we're in. Note the use of truncated division... */
 
-  c = (((uintptr_t)ptr)-(uintptr_t)(&(_context->heap[0])))/sizeof(umm_block);
+    c = (((uintptr_t)ptr) - (uintptr_t)(&(_context->heap[0]))) / sizeof(umm_block);
 
-  /* Figure out how big this block is ... the free bit is not set :-) */
+    /* Figure out how big this block is ... the free bit is not set :-) */
 
-  blockSize = (UMM_NBLOCK(c) - c);
+    blockSize = (UMM_NBLOCK(c) - c);
 
-  /* Figure out how many bytes are in this block */
+    /* Figure out how many bytes are in this block */
 
-  curSize   = (blockSize*sizeof(umm_block))-(sizeof(((umm_block *)0)->header));
+    curSize = (blockSize * sizeof(umm_block)) - (sizeof(((umm_block *)0)->header));
 
-  /* Protect the critical section... */
-  UMM_CRITICAL_ENTRY(id_realloc);
+    /* Protect the critical section... */
+    UMM_CRITICAL_ENTRY(id_realloc);
 
-  /* Now figure out if the previous and/or next blocks are free as well as
-   * their sizes - this will help us to minimize special code later when we
-   * decide if it's possible to use the adjacent blocks.
-   *
-   * We set prevBlockSize and nextBlockSize to non-zero values ONLY if they
-   * are free!
-   */
+    /* Now figure out if the previous and/or next blocks are free as well as
+     * their sizes - this will help us to minimize special code later when we
+     * decide if it's possible to use the adjacent blocks.
+     *
+     * We set prevBlockSize and nextBlockSize to non-zero values ONLY if they
+     * are free!
+     */
 
-  if ((UMM_NBLOCK(UMM_NBLOCK(c)) & UMM_FREELIST_MASK)) {
-      nextBlockSize = (UMM_NBLOCK(UMM_NBLOCK(c)) & UMM_BLOCKNO_MASK) - UMM_NBLOCK(c);
-  }
+    if ((UMM_NBLOCK(UMM_NBLOCK(c)) & UMM_FREELIST_MASK)) {
+        nextBlockSize = (UMM_NBLOCK(UMM_NBLOCK(c)) & UMM_BLOCKNO_MASK) - UMM_NBLOCK(c);
+    }
 
-  if ((UMM_NBLOCK(UMM_PBLOCK(c)) & UMM_FREELIST_MASK)) {
-      prevBlockSize = (c - UMM_PBLOCK(c));
-  }
+    if ((UMM_NBLOCK(UMM_PBLOCK(c)) & UMM_FREELIST_MASK)) {
+        prevBlockSize = (c - UMM_PBLOCK(c));
+    }
 
-  DBGLOG_DEBUG( "realloc blocks %d blockSize %d nextBlockSize %d prevBlockSize %d\n", blocks, blockSize, nextBlockSize, prevBlockSize );
+    DBGLOG_DEBUG("realloc blocks %d blockSize %d nextBlockSize %d prevBlockSize %d\n", blocks, blockSize, nextBlockSize, prevBlockSize);
 
-//C With each upstream update this section should be reevaluated.
+// C With each upstream update this section should be reevaluated.
 /*C
  *
  * The `#if defined(UMM_REALLOC_MINIMIZE_COPY)` section tracks the content of
@@ -976,107 +977,107 @@ void *umm_realloc( void *ptr, size_t size ) {
  * confirm; however, I think this to be the best option when considering the
  * amount of reallocates that can occur with the Strings library.
  */
-#if defined(UMM_REALLOC_MINIMIZE_COPY)
-  /*
-   * Ok, now that we're here we know how many blocks we want and the current
-   * blockSize. The prevBlockSize and nextBlockSize are set and we can figure
-   * out the best strategy for the new allocation as follows:
-   *
-   * 1. If the new block is the same size or smaller than the current block do
-   *    nothing.
-   * 2. If the next block is free and adding it to the current block gives us
-   *    EXACTLY enough memory, assimilate the next block. This avoids unwanted
-   *    fragmentation of free memory.
-   *
-   * The following cases may be better handled with memory copies to reduce
-   * fragmentation
-   *
-   * 3. If the previous block is NOT free and the next block is free and
-   *    adding it to the current block gives us enough memory, assimilate
-   *    the next block. This may introduce a bit of fragmentation.
-   * 4. If the prev block is free and adding it to the current block gives us
-   *    enough memory, remove the previous block from the free list, assimilate
-   *    it, copy to the new block.
-   * 5. If the prev and next blocks are free and adding them to the current
-   *    block gives us enough memory, assimilate the next block, remove the
-   *    previous block from the free list, assimilate it, copy to the new block.
-   * 6. Otherwise try to allocate an entirely new block of memory. If the
-   *    allocation works free the old block and return the new pointer. If
-   *    the allocation fails, return NULL and leave the old block intact.
-   *
-   * TODO: Add some conditional code to optimise for less fragmentation
-   *       by simply allocating new memory if we need to copy anyways.
-   *
-   * All that's left to do is decide if the fit was exact or not. If the fit
-   * was not exact, then split the memory block so that we use only the requested
-   * number of blocks and add what's left to the free list.
-   */
+    #if defined(UMM_REALLOC_MINIMIZE_COPY)
+    /*
+     * Ok, now that we're here we know how many blocks we want and the current
+     * blockSize. The prevBlockSize and nextBlockSize are set and we can figure
+     * out the best strategy for the new allocation as follows:
+     *
+     * 1. If the new block is the same size or smaller than the current block do
+     *    nothing.
+     * 2. If the next block is free and adding it to the current block gives us
+     *    EXACTLY enough memory, assimilate the next block. This avoids unwanted
+     *    fragmentation of free memory.
+     *
+     * The following cases may be better handled with memory copies to reduce
+     * fragmentation
+     *
+     * 3. If the previous block is NOT free and the next block is free and
+     *    adding it to the current block gives us enough memory, assimilate
+     *    the next block. This may introduce a bit of fragmentation.
+     * 4. If the prev block is free and adding it to the current block gives us
+     *    enough memory, remove the previous block from the free list, assimilate
+     *    it, copy to the new block.
+     * 5. If the prev and next blocks are free and adding them to the current
+     *    block gives us enough memory, assimilate the next block, remove the
+     *    previous block from the free list, assimilate it, copy to the new block.
+     * 6. Otherwise try to allocate an entirely new block of memory. If the
+     *    allocation works free the old block and return the new pointer. If
+     *    the allocation fails, return NULL and leave the old block intact.
+     *
+     * TODO: Add some conditional code to optimise for less fragmentation
+     *       by simply allocating new memory if we need to copy anyways.
+     *
+     * All that's left to do is decide if the fit was exact or not. If the fit
+     * was not exact, then split the memory block so that we use only the requested
+     * number of blocks and add what's left to the free list.
+     */
 
     //  Case 1 - block is same size or smaller
     if (blockSize >= blocks) {
-        DBGLOG_DEBUG( "realloc the same or smaller size block - %i, do nothing\n", blocks );
+        DBGLOG_DEBUG("realloc the same or smaller size block - %i, do nothing\n", blocks);
         /* This space intentionally left blank */
 
-    //  Case 2 - block + next block fits EXACTLY
+        //  Case 2 - block + next block fits EXACTLY
     } else if ((blockSize + nextBlockSize) == blocks) {
-        DBGLOG_DEBUG( "exact realloc using next block - %i\n", blocks );
-        umm_assimilate_up( c );
-        STATS__FREE_BLOCKS_UPDATE( - nextBlockSize );
+        DBGLOG_DEBUG("exact realloc using next block - %i\n", blocks);
+        umm_assimilate_up(c);
+        STATS__FREE_BLOCKS_UPDATE(-nextBlockSize);
         blockSize += nextBlockSize;
 
-    //  Case 3 - prev block NOT free and block + next block fits
+        //  Case 3 - prev block NOT free and block + next block fits
     } else if ((0 == prevBlockSize) && (blockSize + nextBlockSize) >= blocks) {
-        DBGLOG_DEBUG( "realloc using next block - %i\n", blocks );
-        umm_assimilate_up( _context, c );
-        STATS__FREE_BLOCKS_UPDATE( - nextBlockSize );
+        DBGLOG_DEBUG("realloc using next block - %i\n", blocks);
+        umm_assimilate_up(_context, c);
+        STATS__FREE_BLOCKS_UPDATE(-nextBlockSize);
         blockSize += nextBlockSize;
 
-    //  Case 4 - prev block + block fits
+        //  Case 4 - prev block + block fits
     } else if ((prevBlockSize + blockSize) >= blocks) {
-        DBGLOG_DEBUG( "realloc using prev block - %i\n", blocks );
-        umm_disconnect_from_free_list( _context, UMM_PBLOCK(c) );
+        DBGLOG_DEBUG("realloc using prev block - %i\n", blocks);
+        umm_disconnect_from_free_list(_context, UMM_PBLOCK(c));
         c = umm_assimilate_down(_context, c, 0);
-        STATS__FREE_BLOCKS_UPDATE( - prevBlockSize );
+        STATS__FREE_BLOCKS_UPDATE(-prevBlockSize);
         STATS__FREE_BLOCKS_ISR_MIN();
         blockSize += prevBlockSize;
         UMM_CRITICAL_SUSPEND(id_realloc);
-        memmove( (void *)&UMM_DATA(c), ptr, curSize );
+        memmove((void *)&UMM_DATA(c), ptr, curSize);
         ptr = (void *)&UMM_DATA(c);
         UMM_CRITICAL_RESUME(id_realloc);
-    //  Case 5 - prev block + block + next block fits
+        //  Case 5 - prev block + block + next block fits
     } else if ((prevBlockSize + blockSize + nextBlockSize) >= blocks) {
-        DBGLOG_DEBUG( "realloc using prev and next block - %d\n", blocks );
-        umm_assimilate_up( _context, c );
-        umm_disconnect_from_free_list( _context, UMM_PBLOCK(c) );
+        DBGLOG_DEBUG("realloc using prev and next block - %d\n", blocks);
+        umm_assimilate_up(_context, c);
+        umm_disconnect_from_free_list(_context, UMM_PBLOCK(c));
         c = umm_assimilate_down(_context, c, 0);
-        STATS__FREE_BLOCKS_UPDATE( - prevBlockSize - nextBlockSize );
-#ifdef UMM_LIGHTWEIGHT_CPU
+        STATS__FREE_BLOCKS_UPDATE(-prevBlockSize - nextBlockSize);
+        #ifdef UMM_LIGHTWEIGHT_CPU
         if ((prevBlockSize + blockSize + nextBlockSize) > blocks) {
-            umm_split_block( _context, c, blocks, 0 );
-            umm_free_core( _context, (void *)&UMM_DATA(c+blocks) );
+            umm_split_block(_context, c, blocks, 0);
+            umm_free_core(_context, (void *)&UMM_DATA(c + blocks));
         }
         STATS__FREE_BLOCKS_ISR_MIN();
         blockSize = blocks;
-#else
+        #else
         blockSize += (prevBlockSize + nextBlockSize);
-#endif
+        #endif
         UMM_CRITICAL_SUSPEND(id_realloc);
-        memmove( (void *)&UMM_DATA(c), ptr, curSize );
+        memmove((void *)&UMM_DATA(c), ptr, curSize);
         ptr = (void *)&UMM_DATA(c);
         UMM_CRITICAL_RESUME(id_realloc);
 
-    //  Case 6 - default is we need to realloc a new block
+        //  Case 6 - default is we need to realloc a new block
     } else {
-        DBGLOG_DEBUG( "realloc a completely new block %i\n", blocks );
+        DBGLOG_DEBUG("realloc a completely new block %i\n", blocks);
         void *oldptr = ptr;
-        if( (ptr = umm_malloc_core( _context, size )) ) {
-            DBGLOG_DEBUG( "realloc %i to a bigger block %i, copy, and free the old\n", blockSize, blocks );
+        if ((ptr = umm_malloc_core(_context, size))) {
+            DBGLOG_DEBUG("realloc %i to a bigger block %i, copy, and free the old\n", blockSize, blocks);
             UMM_CRITICAL_SUSPEND(id_realloc);
-            memcpy( ptr, oldptr, curSize );
+            memcpy(ptr, oldptr, curSize);
             UMM_CRITICAL_RESUME(id_realloc);
-            umm_free_core( _context, oldptr );
+            umm_free_core(_context, oldptr);
         } else {
-            DBGLOG_DEBUG( "realloc %i to a bigger block %i failed - return NULL and leave the old block!\n", blockSize, blocks );
+            DBGLOG_DEBUG("realloc %i to a bigger block %i failed - return NULL and leave the old block!\n", blockSize, blocks);
             /* This space intentionally left blnk */
             /* STATS__OOM_UPDATE() has already been called by umm_malloc_core - don't duplicate count */
         }
@@ -1085,77 +1086,77 @@ void *umm_realloc( void *ptr, size_t size ) {
          */
         blockSize = blocks;
     }
-#elif defined(UMM_REALLOC_DEFRAG)
-  /*
-   * Ok, now that we're here we know how many blocks we want and the current
-   * blockSize. The prevBlockSize and nextBlockSize are set and we can figure
-   * out the best strategy for the new allocation. The following strategy is
-   * focused on defragging the heap:
-   *
-   * 1. If the prev is free and adding it to the current, or current and next
-   *    block, gives us enough memory, proceed. Note, that next block may not
-   *    be available.
-   *    a. Remove the previous block from the free list, assimilate it.
-   *    b. If this new block gives enough memory, copy to the new block.
-   *       Note, this includes the case of same size or smaller block.
-   *    c. Else assimilate the next block, copy to the new block.
-   * 2. If the new block is the same size or smaller than the current block do
-   *    nothing.
-   * 3. If the next block is free and adding it to the current block gives us
-   *    enough memory, assimilate the next block.
-   * 4. Otherwise try to allocate an entirely new block of memory. If the
-   *    allocation works free the old block and return the new pointer. If
-   *    the allocation fails, return NULL and leave the old block intact.
-   *
-   * All that's left to do is decide if the fit was exact or not. If the fit
-   * was not exact, then split the memory block so that we use only the
-   * requested number of blocks and add what's left to the free list.
-   */
-   if (prevBlockSize && (prevBlockSize + blockSize + nextBlockSize) >= blocks) { // 1
-        umm_disconnect_from_free_list( _context, UMM_PBLOCK(c) );
-        c = umm_assimilate_down( _context, c, 0 );
-        STATS__FREE_BLOCKS_UPDATE( - prevBlockSize );
+    #elif defined(UMM_REALLOC_DEFRAG)
+    /*
+     * Ok, now that we're here we know how many blocks we want and the current
+     * blockSize. The prevBlockSize and nextBlockSize are set and we can figure
+     * out the best strategy for the new allocation. The following strategy is
+     * focused on defragging the heap:
+     *
+     * 1. If the prev is free and adding it to the current, or current and next
+     *    block, gives us enough memory, proceed. Note, that next block may not
+     *    be available.
+     *    a. Remove the previous block from the free list, assimilate it.
+     *    b. If this new block gives enough memory, copy to the new block.
+     *       Note, this includes the case of same size or smaller block.
+     *    c. Else assimilate the next block, copy to the new block.
+     * 2. If the new block is the same size or smaller than the current block do
+     *    nothing.
+     * 3. If the next block is free and adding it to the current block gives us
+     *    enough memory, assimilate the next block.
+     * 4. Otherwise try to allocate an entirely new block of memory. If the
+     *    allocation works free the old block and return the new pointer. If
+     *    the allocation fails, return NULL and leave the old block intact.
+     *
+     * All that's left to do is decide if the fit was exact or not. If the fit
+     * was not exact, then split the memory block so that we use only the
+     * requested number of blocks and add what's left to the free list.
+     */
+    if (prevBlockSize && (prevBlockSize + blockSize + nextBlockSize) >= blocks) { // 1
+        umm_disconnect_from_free_list(_context, UMM_PBLOCK(c));
+        c = umm_assimilate_down(_context, c, 0);
+        STATS__FREE_BLOCKS_UPDATE(-prevBlockSize);
         blockSize += prevBlockSize;
         if (blockSize >= blocks) {
-            DBGLOG_DEBUG( "realloc using prev block - %d\n", blocks );
+            DBGLOG_DEBUG("realloc using prev block - %d\n", blocks);
             STATS__FREE_BLOCKS_ISR_MIN();
         } else {
-            DBGLOG_DEBUG( "realloc using prev and next block - %d\n", blocks );
-            umm_assimilate_up( _context, c );
-            STATS__FREE_BLOCKS_UPDATE( - nextBlockSize );
+            DBGLOG_DEBUG("realloc using prev and next block - %d\n", blocks);
+            umm_assimilate_up(_context, c);
+            STATS__FREE_BLOCKS_UPDATE(-nextBlockSize);
             blockSize += nextBlockSize;
-#ifdef UMM_LIGHTWEIGHT_CPU
+            #ifdef UMM_LIGHTWEIGHT_CPU
             if (blockSize > blocks) {
-                umm_split_block( _context, c, blocks, 0 );
-                umm_free_core( _context, (void *)&UMM_DATA(c+blocks) );
+                umm_split_block(_context, c, blocks, 0);
+                umm_free_core(_context, (void *)&UMM_DATA(c + blocks));
             }
             STATS__FREE_BLOCKS_ISR_MIN();
             blockSize = blocks;
-#endif
+            #endif
         }
         UMM_CRITICAL_SUSPEND(id_realloc);
-        memmove( (void *)&UMM_DATA(c), ptr, curSize );
+        memmove((void *)&UMM_DATA(c), ptr, curSize);
         ptr = (void *)&UMM_DATA(c);
         UMM_CRITICAL_RESUME(id_realloc);
     } else if (blockSize >= blocks) { // 2
-        DBGLOG_DEBUG( "realloc the same or smaller size block - %d, do nothing\n", blocks );
+        DBGLOG_DEBUG("realloc the same or smaller size block - %d, do nothing\n", blocks);
         /* This space intentionally left blank */
     } else if ((blockSize + nextBlockSize) >= blocks) { // 3
-        DBGLOG_DEBUG( "realloc using next block - %d\n", blocks );
-        umm_assimilate_up( _context, c );
-        STATS__FREE_BLOCKS_UPDATE( - nextBlockSize );
+        DBGLOG_DEBUG("realloc using next block - %d\n", blocks);
+        umm_assimilate_up(_context, c);
+        STATS__FREE_BLOCKS_UPDATE(-nextBlockSize);
         blockSize += nextBlockSize;
     } else { // 4
-        DBGLOG_DEBUG( "realloc a completely new block %d\n", blocks );
+        DBGLOG_DEBUG("realloc a completely new block %d\n", blocks);
         void *oldptr = ptr;
-        if( (ptr = umm_malloc_core( _context, size )) ) {
-            DBGLOG_DEBUG( "realloc %d to a bigger block %d, copy, and free the old\n", blockSize, blocks );
+        if ((ptr = umm_malloc_core(_context, size))) {
+            DBGLOG_DEBUG("realloc %d to a bigger block %d, copy, and free the old\n", blockSize, blocks);
             UMM_CRITICAL_SUSPEND(id_realloc);
-            memcpy( ptr, oldptr, curSize );
+            memcpy(ptr, oldptr, curSize);
             UMM_CRITICAL_RESUME(id_realloc);
-            umm_free_core( _context, oldptr);
+            umm_free_core(_context, oldptr);
         } else {
-            DBGLOG_DEBUG( "realloc %d to a bigger block %d failed - return NULL and leave the old block!\n", blockSize, blocks );
+            DBGLOG_DEBUG("realloc %d to a bigger block %d failed - return NULL and leave the old block!\n", blockSize, blocks);
             /* This space intentionally left blnk */
             /* STATS__OOM_UPDATE() has already been called by umm_malloc_core - don't duplicate count */
         }
@@ -1164,23 +1165,23 @@ void *umm_realloc( void *ptr, size_t size ) {
          */
         blockSize = blocks;
     }
-#else
-#warning "Neither UMM_REALLOC_DEFRAG nor UMM_REALLOC_MINIMIZE_COPY is defined - check umm_malloc_cfg.h"
+    #else
+    #warning "Neither UMM_REALLOC_DEFRAG nor UMM_REALLOC_MINIMIZE_COPY is defined - check umm_malloc_cfg.h"
     /* An always copy option just for performance/fragmentation comparison */
     if (blockSize >= blocks) {
-        DBGLOG_DEBUG( "realloc the same or smaller size block - %d, do nothing\n", blocks );
+        DBGLOG_DEBUG("realloc the same or smaller size block - %d, do nothing\n", blocks);
         /* This space intentionally left blank */
     } else {
-        DBGLOG_DEBUG( "realloc a completely new block %d\n", blocks );
+        DBGLOG_DEBUG("realloc a completely new block %d\n", blocks);
         void *oldptr = ptr;
-        if( (ptr = umm_malloc_core( _context, size )) ) {
-            DBGLOG_DEBUG( "realloc %d to a bigger block %d, copy, and free the old\n", blockSize, blocks );
+        if ((ptr = umm_malloc_core(_context, size))) {
+            DBGLOG_DEBUG("realloc %d to a bigger block %d, copy, and free the old\n", blockSize, blocks);
             UMM_CRITICAL_SUSPEND(id_realloc);
-            memcpy( ptr, oldptr, curSize );
+            memcpy(ptr, oldptr, curSize);
             UMM_CRITICAL_RESUME(id_realloc);
-            umm_free_core( _context, oldptr );
+            umm_free_core(_context, oldptr);
         } else {
-            DBGLOG_DEBUG( "realloc %d to a bigger block %d failed - return NULL and leave the old block!\n", blockSize, blocks );
+            DBGLOG_DEBUG("realloc %d to a bigger block %d failed - return NULL and leave the old block!\n", blockSize, blocks);
             /* This space intentionally left blnk */
             /* STATS__OOM_UPDATE() has already been called by umm_malloc_core - don't duplicate count */
         }
@@ -1189,15 +1190,15 @@ void *umm_realloc( void *ptr, size_t size ) {
          */
         blockSize = blocks;
     }
-#endif
+    #endif
     /* Now all we need to do is figure out if the block fit exactly or if we
      * need to split and free ...
      */
 
-    if (blockSize > blocks ) {
-        DBGLOG_DEBUG( "split and free %d blocks from %d\n", blocks, blockSize );
-        umm_split_block( _context, c, blocks, 0 );
-        umm_free_core( _context, (void *)&UMM_DATA(c+blocks) );
+    if (blockSize > blocks) {
+        DBGLOG_DEBUG("split and free %d blocks from %d\n", blocks, blockSize);
+        umm_split_block(_context, c, blocks, 0);
+        umm_free_core(_context, (void *)&UMM_DATA(c + blocks));
     }
 
     STATS__FREE_BLOCKS_MIN();
@@ -1205,20 +1206,21 @@ void *umm_realloc( void *ptr, size_t size ) {
     /* Release the critical section... */
     UMM_CRITICAL_EXIT(id_realloc);
 
-    return( ptr );
+    return ptr;
 }
 
 /* ------------------------------------------------------------------------ */
 
-void *umm_calloc( size_t num, size_t item_size ) {
-  void *ret;
+void *umm_calloc(size_t num, size_t item_size) {
+    void *ret;
 
-  ret = umm_malloc((size_t)(item_size * num));
+    ret = umm_malloc((size_t)(item_size * num));
 
-  if (ret)
-      memset(ret, 0x00, (size_t)(item_size * num));
+    if (ret) {
+        memset(ret, 0x00, (size_t)(item_size * num));
+    }
 
-  return ret;
+    return ret;
 }
 
 /* ------------------------------------------------------------------------ */
diff --git a/cores/esp8266/umm_malloc/umm_malloc.h b/cores/esp8266/umm_malloc/umm_malloc.h
index f7dcc72c1b..d3e3ace561 100644
--- a/cores/esp8266/umm_malloc/umm_malloc.h
+++ b/cores/esp8266/umm_malloc/umm_malloc.h
@@ -10,7 +10,7 @@
 
 #include <stdint.h>
 
-//C This include is not in upstream
+// C This include is not in upstream
 #include "umm_malloc_cfg.h"   /* user-dependent */
 
 #ifdef __cplusplus
@@ -19,28 +19,28 @@ extern "C" {
 
 
 #ifdef UMM_HEAP_EXTERNAL
-extern void  umm_init_vm( void *vmaddr, unsigned int vmsize );
+extern void  umm_init_vm(void *vmaddr, unsigned int vmsize);
 #endif
 #ifdef UMM_HEAP_IRAM
 extern void umm_init_iram(void);
-extern void umm_init_iram_ex( void *addr, unsigned int size, bool zero );
+extern void umm_init_iram_ex(void *addr, unsigned int size, bool zero);
 #endif
 /* ------------------------------------------------------------------------ */
 
-extern void  umm_init( void );
-extern void *umm_malloc( size_t size );
-extern void *umm_calloc( size_t num, size_t size );
-extern void *umm_realloc( void *ptr, size_t size );
-extern void  umm_free( void *ptr );
+extern void  umm_init(void);
+extern void *umm_malloc(size_t size);
+extern void *umm_calloc(size_t num, size_t size);
+extern void *umm_realloc(void *ptr, size_t size);
+extern void  umm_free(void *ptr);
 
 /* ------------------------------------------------------------------------ */
 
-extern umm_heap_context_t *umm_push_heap( size_t heap_number );
-extern umm_heap_context_t *umm_pop_heap( void );
-extern int umm_get_heap_stack_index( void );
-extern umm_heap_context_t *umm_set_heap_by_id( size_t which );
-extern size_t umm_get_current_heap_id( void );
-extern umm_heap_context_t *umm_get_current_heap( void );
+extern umm_heap_context_t *umm_push_heap(size_t heap_number);
+extern umm_heap_context_t *umm_pop_heap(void);
+extern int umm_get_heap_stack_index(void);
+extern umm_heap_context_t *umm_set_heap_by_id(size_t which);
+extern size_t umm_get_current_heap_id(void);
+extern umm_heap_context_t *umm_get_current_heap(void);
 
 #ifdef __cplusplus
 }
diff --git a/cores/esp8266/umm_malloc/umm_malloc_cfg.h b/cores/esp8266/umm_malloc/umm_malloc_cfg.h
index 9b1d0ba568..3ec44c1f40 100644
--- a/cores/esp8266/umm_malloc/umm_malloc_cfg.h
+++ b/cores/esp8266/umm_malloc/umm_malloc_cfg.h
@@ -192,13 +192,13 @@ extern char _heap_start[];
 /* -------------------------------------------------------------------------- */
 
 #ifdef UMM_BEST_FIT
-  #ifdef  UMM_FIRST_FIT
-    #error Both UMM_BEST_FIT and UMM_FIRST_FIT are defined - pick one!
-  #endif
+#ifdef  UMM_FIRST_FIT
+#error Both UMM_BEST_FIT and UMM_FIRST_FIT are defined - pick one!
+#endif
 #else /* UMM_BEST_FIT is not defined */
-  #ifndef UMM_FIRST_FIT
+#ifndef UMM_FIRST_FIT
     #define UMM_BEST_FIT
-  #endif
+#endif
 #endif
 
 /* -------------------------------------------------------------------------- */
@@ -207,9 +207,9 @@ extern char _heap_start[];
   #define UMM_FRAGMENTATION_METRIC_INIT() umm_fragmentation_metric_init(_context)
   #define UMM_FRAGMENTATION_METRIC_ADD(c) umm_fragmentation_metric_add(_context, c)
   #define UMM_FRAGMENTATION_METRIC_REMOVE(c) umm_fragmentation_metric_remove(_context, c)
-  #ifndef UMM_INFO
+#ifndef UMM_INFO
   #define UMM_INFO
-  #endif
+#endif
 #else
   #define UMM_FRAGMENTATION_METRIC_INIT()
   #define UMM_FRAGMENTATION_METRIC_ADD(c)
@@ -229,7 +229,7 @@ extern char _heap_start[];
 // #define UMM_INFO
 
 #ifdef UMM_INFO
-  typedef struct UMM_HEAP_INFO_t {
+typedef struct UMM_HEAP_INFO_t {
     unsigned int totalEntries;
     unsigned int usedEntries;
     unsigned int freeEntries;
@@ -238,33 +238,33 @@ extern char _heap_start[];
     unsigned int usedBlocks;
     unsigned int freeBlocks;
     unsigned int freeBlocksSquared;
-#ifdef UMM_INLINE_METRICS
+    #ifdef UMM_INLINE_METRICS
     size_t oom_count;
     #define UMM_OOM_COUNT info.oom_count
     #define UMM_FREE_BLOCKS info.freeBlocks
-#endif
+    #endif
     unsigned int maxFreeContiguousBlocks;
-  }
-  UMM_HEAP_INFO;
+}
+UMM_HEAP_INFO;
 
-  // extern UMM_HEAP_INFO ummHeapInfo;
+// extern UMM_HEAP_INFO ummHeapInfo;
 struct UMM_HEAP_CONTEXT;
 typedef struct UMM_HEAP_CONTEXT umm_heap_context_t;
 
-  extern ICACHE_FLASH_ATTR void *umm_info( void *ptr, bool force );
+extern ICACHE_FLASH_ATTR void *umm_info(void *ptr, bool force);
 #ifdef UMM_INLINE_METRICS
-  extern size_t umm_free_heap_size( void );
+extern size_t umm_free_heap_size(void);
 #else
-  extern ICACHE_FLASH_ATTR size_t umm_free_heap_size( void );
-#endif
-  // umm_max_block_size changed to umm_max_free_block_size in upstream.
-  extern ICACHE_FLASH_ATTR size_t umm_max_block_size( void );
-  extern ICACHE_FLASH_ATTR int umm_usage_metric( void );
-  extern ICACHE_FLASH_ATTR int umm_fragmentation_metric( void );
-  extern ICACHE_FLASH_ATTR size_t umm_free_heap_size_core( umm_heap_context_t *_context );
-  extern ICACHE_FLASH_ATTR size_t umm_max_block_size_core( umm_heap_context_t *_context );
-  extern ICACHE_FLASH_ATTR int umm_usage_metric_core( umm_heap_context_t *_context );
-  extern ICACHE_FLASH_ATTR int umm_fragmentation_metric_core( umm_heap_context_t *_context );
+extern ICACHE_FLASH_ATTR size_t umm_free_heap_size(void);
+#endif
+// umm_max_block_size changed to umm_max_free_block_size in upstream.
+extern ICACHE_FLASH_ATTR size_t umm_max_block_size(void);
+extern ICACHE_FLASH_ATTR int umm_usage_metric(void);
+extern ICACHE_FLASH_ATTR int umm_fragmentation_metric(void);
+extern ICACHE_FLASH_ATTR size_t umm_free_heap_size_core(umm_heap_context_t *_context);
+extern ICACHE_FLASH_ATTR size_t umm_max_block_size_core(umm_heap_context_t *_context);
+extern ICACHE_FLASH_ATTR int umm_usage_metric_core(umm_heap_context_t *_context);
+extern ICACHE_FLASH_ATTR int umm_fragmentation_metric_core(umm_heap_context_t *_context);
 #else
   #define umm_info(p,b)
   #define umm_free_heap_size() (0)
@@ -312,27 +312,27 @@ typedef struct UMM_HEAP_CONTEXT umm_heap_context_t;
 #if defined(UMM_STATS) || defined(UMM_STATS_FULL)
 
 typedef struct UMM_STATISTICS_t {
-#ifndef UMM_INLINE_METRICS
+    #ifndef UMM_INLINE_METRICS
 // If we are doing UMM_INLINE_METRICS, we can move oom_count and free_blocks to
 // umm_info's structure and save a little DRAM and IRAM.
 // Otherwise it is defined here.
-  size_t free_blocks;
-  size_t oom_count;
+    size_t free_blocks;
+    size_t oom_count;
   #define UMM_OOM_COUNT stats.oom_count
   #define UMM_FREE_BLOCKS stats.free_blocks
-#endif
-#ifdef UMM_STATS_FULL
-  size_t free_blocks_min;
-  size_t free_blocks_isr_min;
-  size_t alloc_max_size;
-  size_t last_alloc_size;
-  size_t id_malloc_count;
-  size_t id_malloc_zero_count;
-  size_t id_realloc_count;
-  size_t id_realloc_zero_count;
-  size_t id_free_count;
-  size_t id_free_null_count;
-#endif
+    #endif
+    #ifdef UMM_STATS_FULL
+    size_t free_blocks_min;
+    size_t free_blocks_isr_min;
+    size_t alloc_max_size;
+    size_t last_alloc_size;
+    size_t id_malloc_count;
+    size_t id_malloc_zero_count;
+    size_t id_realloc_count;
+    size_t id_realloc_zero_count;
+    size_t id_free_count;
+    size_t id_free_null_count;
+    #endif
 }
 UMM_STATISTICS;
 
@@ -344,8 +344,8 @@ UMM_STATISTICS;
 
 #define STATS__OOM_UPDATE() _context->UMM_OOM_COUNT += 1
 
-extern size_t umm_free_heap_size_lw( void );
-extern size_t umm_get_oom_count( void );
+extern size_t umm_free_heap_size_lw(void);
+extern size_t umm_get_oom_count(void);
 
 #else  // not UMM_STATS or UMM_STATS_FULL
 #define STATS__FREE_BLOCKS_UPDATE(s) (void)(s)
@@ -353,59 +353,62 @@ extern size_t umm_get_oom_count( void );
 #endif
 
 #if defined(UMM_STATS) || defined(UMM_STATS_FULL) || defined(UMM_INFO)
-size_t ICACHE_FLASH_ATTR umm_block_size( void );
+size_t ICACHE_FLASH_ATTR umm_block_size(void);
 #endif
 
 #ifdef UMM_STATS_FULL
 #define STATS__FREE_BLOCKS_MIN() \
-do { \
-    if (_context->UMM_FREE_BLOCKS < _context->stats.free_blocks_min) \
-        _context->stats.free_blocks_min = _context->UMM_FREE_BLOCKS; \
-} while(false)
+    do { \
+        if (_context->UMM_FREE_BLOCKS < _context->stats.free_blocks_min) { \
+            _context->stats.free_blocks_min = _context->UMM_FREE_BLOCKS;  \
+        } \
+    } while (false)
 
 #define STATS__FREE_BLOCKS_ISR_MIN() \
-do { \
-    if (_context->UMM_FREE_BLOCKS < _context->stats.free_blocks_isr_min) \
-        _context->stats.free_blocks_isr_min = _context->UMM_FREE_BLOCKS; \
-} while(false)
+    do { \
+        if (_context->UMM_FREE_BLOCKS < _context->stats.free_blocks_isr_min) { \
+            _context->stats.free_blocks_isr_min = _context->UMM_FREE_BLOCKS; \
+        } \
+    } while (false)
 
 #define STATS__ALLOC_REQUEST(tag, s)  \
-do { \
-    _context->stats.tag##_count += 1; \
-    _context->stats.last_alloc_size = s; \
-    if (_context->stats.alloc_max_size < s) \
-        _context->stats.alloc_max_size = s; \
-} while(false)
+    do { \
+        _context->stats.tag##_count += 1; \
+        _context->stats.last_alloc_size = s; \
+        if (_context->stats.alloc_max_size < s) { \
+            _context->stats.alloc_max_size = s; \
+        } \
+    } while (false)
 
 #define STATS__ZERO_ALLOC_REQUEST(tag, s)  \
-do { \
-    _context->stats.tag##_zero_count += 1; \
-} while(false)
+    do { \
+        _context->stats.tag##_zero_count += 1; \
+    } while (false)
 
 #define STATS__NULL_FREE_REQUEST(tag)  \
-do { \
-    umm_heap_context_t *_context = umm_get_current_heap(); \
-    _context->stats.tag##_null_count += 1; \
-} while(false)
+    do { \
+        umm_heap_context_t *_context = umm_get_current_heap(); \
+        _context->stats.tag##_null_count += 1; \
+    } while (false)
 
 #define STATS__FREE_REQUEST(tag)  \
-do { \
-    _context->stats.tag##_count += 1; \
-} while(false)
-
-
-size_t umm_free_heap_size_lw_min( void );
-size_t umm_free_heap_size_min_reset( void );
-size_t umm_free_heap_size_min( void );
-size_t umm_free_heap_size_isr_min( void );
-size_t umm_get_max_alloc_size( void );
-size_t umm_get_last_alloc_size( void );
-size_t umm_get_malloc_count( void );
-size_t umm_get_malloc_zero_count( void );
-size_t umm_get_realloc_count( void );
-size_t umm_get_realloc_zero_count( void );
-size_t umm_get_free_count( void );
-size_t umm_get_free_null_count( void );
+    do { \
+        _context->stats.tag##_count += 1; \
+    } while (false)
+
+
+size_t umm_free_heap_size_lw_min(void);
+size_t umm_free_heap_size_min_reset(void);
+size_t umm_free_heap_size_min(void);
+size_t umm_free_heap_size_isr_min(void);
+size_t umm_get_max_alloc_size(void);
+size_t umm_get_last_alloc_size(void);
+size_t umm_get_malloc_count(void);
+size_t umm_get_malloc_zero_count(void);
+size_t umm_get_realloc_count(void);
+size_t umm_get_realloc_zero_count(void);
+size_t umm_get_free_count(void);
+size_t umm_get_free_null_count(void);
 
 #else // Not UMM_STATS_FULL
 #define STATS__FREE_BLOCKS_MIN()          (void)0
@@ -448,10 +451,10 @@ size_t umm_get_free_null_count( void );
 // This option adds support for gathering time locked data
 
 typedef struct UMM_TIME_STAT_t {
-  uint32_t min;
-  uint32_t max;
-  uint32_t start;
-  uint32_t intlevel;
+    uint32_t min;
+    uint32_t max;
+    uint32_t start;
+    uint32_t intlevel;
 }
 UMM_TIME_STAT;
 
@@ -462,23 +465,25 @@ extern UMM_TIME_STATS time_stats;
 bool get_umm_get_perf_data(UMM_TIME_STATS *p, size_t size);
 
 static inline void _critical_entry(UMM_TIME_STAT *p, uint32_t *saved_ps) {
-  *saved_ps = xt_rsil(DEFAULT_CRITICAL_SECTION_INTLEVEL);
-  if (0U != (*saved_ps & 0x0FU)) {
-      p->intlevel += 1U;
-  }
+    *saved_ps = xt_rsil(DEFAULT_CRITICAL_SECTION_INTLEVEL);
+    if (0U != (*saved_ps & 0x0FU)) {
+        p->intlevel += 1U;
+    }
 
-  p->start = esp_get_cycle_count();
+    p->start = esp_get_cycle_count();
 }
 
 static inline void _critical_exit(UMM_TIME_STAT *p, uint32_t *saved_ps) {
-  uint32_t elapse = esp_get_cycle_count() - p->start;
-  if (elapse < p->min)
-      p->min = elapse;
+    uint32_t elapse = esp_get_cycle_count() - p->start;
+    if (elapse < p->min) {
+        p->min = elapse;
+    }
 
-  if (elapse > p->max)
-      p->max = elapse;
+    if (elapse > p->max) {
+        p->max = elapse;
+    }
 
-  xt_wsr_ps(*saved_ps);
+    xt_wsr_ps(*saved_ps);
 }
 #endif
 //////////////////////////////////////////////////////////////////////////////////////
@@ -495,33 +500,33 @@ static inline void _critical_exit(UMM_TIME_STAT *p, uint32_t *saved_ps) {
  */
 
 #ifdef UMM_TEST_BUILD
-    extern int umm_critical_depth;
-    extern int umm_max_critical_depth;
-    #define UMM_CRITICAL_ENTRY() {\
-          ++umm_critical_depth; \
-          if (umm_critical_depth > umm_max_critical_depth) { \
-              umm_max_critical_depth = umm_critical_depth; \
-          } \
-    }
+extern int umm_critical_depth;
+extern int umm_max_critical_depth;
+    #define UMM_CRITICAL_ENTRY() { \
+        ++umm_critical_depth; \
+        if (umm_critical_depth > umm_max_critical_depth) { \
+            umm_max_critical_depth = umm_critical_depth; \
+        } \
+}
     #define UMM_CRITICAL_EXIT()  (umm_critical_depth--)
 #else
-    #if defined(UMM_CRITICAL_METRICS)
+#if defined(UMM_CRITICAL_METRICS)
         #define UMM_CRITICAL_DECL(tag) uint32_t _saved_ps_##tag
         #define UMM_CRITICAL_ENTRY(tag)_critical_entry(&time_stats.tag, &_saved_ps_##tag)
         #define UMM_CRITICAL_EXIT(tag) _critical_exit(&time_stats.tag, &_saved_ps_##tag)
         #define UMM_CRITICAL_WITHINISR(tag) (0 != (_saved_ps_##tag & 0x0F))
 
-    #else  // ! UMM_CRITICAL_METRICS
-        // This method preserves the intlevel on entry and restores the
-        // original intlevel at exit.
+#else      // ! UMM_CRITICAL_METRICS
+// This method preserves the intlevel on entry and restores the
+// original intlevel at exit.
         #define UMM_CRITICAL_DECL(tag) uint32_t _saved_ps_##tag
         #define UMM_CRITICAL_ENTRY(tag) _saved_ps_##tag = xt_rsil(DEFAULT_CRITICAL_SECTION_INTLEVEL)
         #define UMM_CRITICAL_EXIT(tag) xt_wsr_ps(_saved_ps_##tag)
         #define UMM_CRITICAL_WITHINISR(tag) (0 != (_saved_ps_##tag & 0x0F))
-    #endif
+#endif
 #endif
 
- /*
+/*
   * -D UMM_LIGHTWEIGHT_CPU
   *
   * The use of this macro is hardware/application specific.
@@ -550,8 +555,8 @@ static inline void _critical_exit(UMM_TIME_STAT *p, uint32_t *saved_ps) {
 #define UMM_CRITICAL_SUSPEND(tag) UMM_CRITICAL_EXIT(tag)
 #define UMM_CRITICAL_RESUME(tag) UMM_CRITICAL_ENTRY(tag)
 #else
-#define UMM_CRITICAL_SUSPEND(tag) do {} while(0)
-#define UMM_CRITICAL_RESUME(tag) do {} while(0)
+#define UMM_CRITICAL_SUSPEND(tag) do {} while (0)
+#define UMM_CRITICAL_RESUME(tag) do {} while (0)
 #endif
 
 /*
@@ -594,12 +599,12 @@ static inline void _critical_exit(UMM_TIME_STAT *p, uint32_t *saved_ps) {
  */
 
 #ifdef UMM_INTEGRITY_CHECK
-   extern bool umm_integrity_check( void );
-#  define INTEGRITY_CHECK() umm_integrity_check()
-   extern void umm_corruption(void);
-#  define UMM_HEAP_CORRUPTION_CB() DBGLOG_FUNCTION( "Heap Corruption!" )
+extern bool umm_integrity_check(void);
+#define INTEGRITY_CHECK() umm_integrity_check()
+extern void umm_corruption(void);
+#define UMM_HEAP_CORRUPTION_CB() DBGLOG_FUNCTION("Heap Corruption!")
 #else
-#  define INTEGRITY_CHECK() (1)
+#define INTEGRITY_CHECK() (1)
 #endif
 
 /////////////////////////////////////////////////
@@ -669,33 +674,33 @@ static inline void _critical_exit(UMM_TIME_STAT *p, uint32_t *saved_ps) {
 #define UMM_POISONED_BLOCK_LEN_TYPE uint32_t
 
 #if defined(UMM_POISON_CHECK) || defined(UMM_POISON_CHECK_LITE)
-   extern void *umm_poison_malloc( size_t size );
-   extern void *umm_poison_calloc( size_t num, size_t size );
-   extern void *umm_poison_realloc( void *ptr, size_t size );
-   extern void  umm_poison_free( void *ptr );
-   extern bool  umm_poison_check( void );
-   // Local Additions to better report location in code of the caller.
-   void *umm_poison_realloc_fl( void *ptr, size_t size, const char* file, int line );
-   void  umm_poison_free_fl( void *ptr, const char* file, int line );
-   #if defined(UMM_POISON_CHECK_LITE)
-   /*
+extern void *umm_poison_malloc(size_t size);
+extern void *umm_poison_calloc(size_t num, size_t size);
+extern void *umm_poison_realloc(void *ptr, size_t size);
+extern void  umm_poison_free(void *ptr);
+extern bool  umm_poison_check(void);
+// Local Additions to better report location in code of the caller.
+void *umm_poison_realloc_fl(void *ptr, size_t size, const char *file, int line);
+void  umm_poison_free_fl(void *ptr, const char *file, int line);
+#if defined(UMM_POISON_CHECK_LITE)
+/*
     * We can safely do individual poison checks at free and realloc and stay
     * under 10us or close.
     */
-   #  define POISON_CHECK() 1
-   #  define POISON_CHECK_NEIGHBORS(c) \
-     do {\
-       if(!check_poison_neighbors(_context, c)) \
-         panic();\
-     } while(false)
-   #else
-   /* Not normally enabled. A full heap poison check may exceed 10us. */
-   #  define POISON_CHECK() umm_poison_check()
-   #  define POISON_CHECK_NEIGHBORS(c) do{}while(false)
-   #endif
+   #define POISON_CHECK() 1
+   #define POISON_CHECK_NEIGHBORS(c) \
+    do { \
+        if (!check_poison_neighbors(_context, c)) \
+        panic(); \
+    } while (false)
+#else
+/* Not normally enabled. A full heap poison check may exceed 10us. */
+   #define POISON_CHECK() umm_poison_check()
+   #define POISON_CHECK_NEIGHBORS(c) do {} while (false)
+#endif
 #else
-#  define POISON_CHECK() 1
-#  define POISON_CHECK_NEIGHBORS(c) do{}while(false)
+#define POISON_CHECK() 1
+#define POISON_CHECK_NEIGHBORS(c) do {} while (false)
 #endif
 
 
@@ -705,13 +710,13 @@ static inline void _critical_exit(UMM_TIME_STAT *p, uint32_t *saved_ps) {
  * that can actually be allocated.
  */
 #define UMM_OVERHEAD_ADJUST ( \
-  umm_block_size()/2 + \
-  UMM_POISON_SIZE_BEFORE + \
-  UMM_POISON_SIZE_AFTER + \
-  sizeof(UMM_POISONED_BLOCK_LEN_TYPE))
+    umm_block_size() / 2 + \
+    UMM_POISON_SIZE_BEFORE + \
+    UMM_POISON_SIZE_AFTER + \
+    sizeof(UMM_POISONED_BLOCK_LEN_TYPE))
 
 #else
-#define UMM_OVERHEAD_ADJUST  (umm_block_size()/2)
+#define UMM_OVERHEAD_ADJUST  (umm_block_size() / 2)
 #endif
 
 
@@ -722,9 +727,9 @@ static inline void _critical_exit(UMM_TIME_STAT *p, uint32_t *saved_ps) {
 #if defined(DEBUG_ESP_PORT) || defined(DEBUG_ESP_OOM) || \
     defined(UMM_POISON_CHECK) || defined(UMM_POISON_CHECK_LITE) || \
     defined(UMM_INTEGRITY_CHECK)
-#define DBGLOG_FUNCTION(fmt, ...) ets_uart_printf(fmt, ##__VA_ARGS__)
+#define DBGLOG_FUNCTION(fmt, ...) ets_uart_printf(fmt,##__VA_ARGS__)
 #else
-#define DBGLOG_FUNCTION(fmt, ...)   do { (void)fmt; } while(false)
+#define DBGLOG_FUNCTION(fmt, ...)   do { (void)fmt; } while (false)
 #endif
 
 /////////////////////////////////////////////////
@@ -739,19 +744,19 @@ static inline void _critical_exit(UMM_TIME_STAT *p, uint32_t *saved_ps) {
 
 #if defined(UMM_CRITICAL_METRICS)
 struct UMM_TIME_STATS_t {
-  UMM_TIME_STAT id_malloc;
-  UMM_TIME_STAT id_realloc;
-  UMM_TIME_STAT id_free;
-#ifdef UMM_INFO
-  UMM_TIME_STAT id_info;
-#endif
-#if defined(UMM_POISON_CHECK) || defined(UMM_POISON_CHECK_LITE)
-  UMM_TIME_STAT id_poison;
-#endif
-#ifdef UMM_INTEGRITY_CHECK
-  UMM_TIME_STAT id_integrity;
-#endif
-  UMM_TIME_STAT id_no_tag;
+    UMM_TIME_STAT id_malloc;
+    UMM_TIME_STAT id_realloc;
+    UMM_TIME_STAT id_free;
+    #ifdef UMM_INFO
+    UMM_TIME_STAT id_info;
+    #endif
+    #if defined(UMM_POISON_CHECK) || defined(UMM_POISON_CHECK_LITE)
+    UMM_TIME_STAT id_poison;
+    #endif
+    #ifdef UMM_INTEGRITY_CHECK
+    UMM_TIME_STAT id_integrity;
+    #endif
+    UMM_TIME_STAT id_no_tag;
 };
 #endif
 /////////////////////////////////////////////////
@@ -764,17 +769,17 @@ struct UMM_TIME_STATS_t {
 
 #define umm_zalloc(s) umm_calloc(1,s)
 
-void* malloc_loc (size_t s, const char* file, int line);
-void* calloc_loc (size_t n, size_t s, const char* file, int line);
-void* realloc_loc (void* p, size_t s, const char* file, int line);
+void *malloc_loc(size_t s, const char *file, int line);
+void *calloc_loc(size_t n, size_t s, const char *file, int line);
+void *realloc_loc(void *p, size_t s, const char *file, int line);
 // *alloc are macro calling *alloc_loc calling+checking umm_*alloc()
 // they are defined at the bottom of this file
 
 /////////////////////////////////////////////////
 
 #elif defined(UMM_POISON_CHECK)
-void* realloc_loc (void* p, size_t s, const char* file, int line);
-void  free_loc (void* p, const char* file, int line);
+void *realloc_loc(void *p, size_t s, const char *file, int line);
+void  free_loc(void *p, const char *file, int line);
 #else // !defined(ESP_DEBUG_OOM)
 #endif
 
@@ -797,11 +802,11 @@ extern "C" {
 #include <pgmspace.h>
 // Reuse pvPort* calls, since they already support passing location information.
 // Specifically the debug version (heap_...) that does not force DRAM heap.
-void* IRAM_ATTR heap_pvPortMalloc(size_t size, const char* file, int line);
-void* IRAM_ATTR heap_pvPortCalloc(size_t count, size_t size, const char* file, int line);
-void* IRAM_ATTR heap_pvPortRealloc(void *ptr, size_t size, const char* file, int line);
-void* IRAM_ATTR heap_pvPortZalloc(size_t size, const char* file, int line);
-void  IRAM_ATTR heap_vPortFree(void *ptr, const char* file, int line);
+void *IRAM_ATTR heap_pvPortMalloc(size_t size, const char *file, int line);
+void *IRAM_ATTR heap_pvPortCalloc(size_t count, size_t size, const char *file, int line);
+void *IRAM_ATTR heap_pvPortRealloc(void *ptr, size_t size, const char *file, int line);
+void *IRAM_ATTR heap_pvPortZalloc(size_t size, const char *file, int line);
+void IRAM_ATTR heap_vPortFree(void *ptr, const char *file, int line);
 
 #define malloc(s) ({ static const char mem_debug_file[] PROGMEM STORE_ATTR = __FILE__; heap_pvPortMalloc(s, mem_debug_file, __LINE__); })
 #define calloc(n,s) ({ static const char mem_debug_file[] PROGMEM STORE_ATTR = __FILE__; heap_pvPortCalloc(n, s, mem_debug_file, __LINE__); })
@@ -815,11 +820,11 @@ void  IRAM_ATTR heap_vPortFree(void *ptr, const char* file, int line);
 
 #elif defined(UMM_POISON_CHECK) || defined(UMM_POISON_CHECK_LITE)
 #include <pgmspace.h>
-void* IRAM_ATTR heap_pvPortRealloc(void *ptr, size_t size, const char* file, int line);
+void *IRAM_ATTR heap_pvPortRealloc(void *ptr, size_t size, const char *file, int line);
 #define realloc(p,s) ({ static const char mem_debug_file[] PROGMEM STORE_ATTR = __FILE__; heap_pvPortRealloc(p, s, mem_debug_file, __LINE__); })
 
-void  IRAM_ATTR heap_vPortFree(void *ptr, const char* file, int line);
-//C - to be discussed
+void IRAM_ATTR heap_vPortFree(void *ptr, const char *file, int line);
+// C - to be discussed
 /*
   Problem, I would like to report the file and line number with the umm poison
   event as close as possible to the event. The #define method works for malloc,
diff --git a/cores/esp8266/umm_malloc/umm_poison.c b/cores/esp8266/umm_malloc/umm_poison.c
index 01122c1a73..1d5e7651de 100644
--- a/cores/esp8266/umm_malloc/umm_poison.c
+++ b/cores/esp8266/umm_malloc/umm_poison.c
@@ -13,27 +13,27 @@
  * If `s` is 0, returns 0.
  */
 static size_t poison_size(size_t s) {
-    return(s ? (UMM_POISON_SIZE_BEFORE +
-                sizeof(UMM_POISONED_BLOCK_LEN_TYPE) +
-                UMM_POISON_SIZE_AFTER)
-             : 0);
+    return s ? (UMM_POISON_SIZE_BEFORE +
+        sizeof(UMM_POISONED_BLOCK_LEN_TYPE) +
+        UMM_POISON_SIZE_AFTER)
+             : 0;
 }
 
 /*
  * Print memory contents starting from given `ptr`
  */
-static void dump_mem ( const void *vptr, size_t len ) {
-  const uint8_t *ptr = (const uint8_t *)vptr;
-  while (len--) {
-    DBGLOG_ERROR(" 0x%.2x", (unsigned int)(*ptr++));
-  }
+static void dump_mem(const void *vptr, size_t len) {
+    const uint8_t *ptr = (const uint8_t *)vptr;
+    while (len--) {
+        DBGLOG_ERROR(" 0x%.2x", (unsigned int)(*ptr++));
+    }
 }
 
 /*
  * Put poison data at given `ptr` and `poison_size`
  */
-static void put_poison( void *ptr, size_t poison_size ) {
-  memset(ptr, POISON_BYTE, poison_size);
+static void put_poison(void *ptr, size_t poison_size) {
+    memset(ptr, POISON_BYTE, poison_size);
 }
 
 /*
@@ -43,56 +43,56 @@ static void put_poison( void *ptr, size_t poison_size ) {
  * If poison is there, returns 1.
  * Otherwise, prints the appropriate message, and returns 0.
  */
-static bool check_poison( const void *ptr, size_t poison_size,
+static bool check_poison(const void *ptr, size_t poison_size,
     const char *where) {
-  size_t i;
-  bool ok = true;
-
-  for (i = 0; i < poison_size; i++) {
-    if (((const uint8_t *)ptr)[i] != POISON_BYTE) {
-      ok = false;
-      break;
+    size_t i;
+    bool ok = true;
+
+    for (i = 0; i < poison_size; i++) {
+        if (((const uint8_t *)ptr)[i] != POISON_BYTE) {
+            ok = false;
+            break;
+        }
     }
-  }
 
-  if (!ok) {
-    DBGLOG_ERROR( "No poison %s block at: 0x%lx, actual data:", where, (unsigned long)ptr);
-    dump_mem(ptr, poison_size);
-    DBGLOG_ERROR( "\n" );
-  }
+    if (!ok) {
+        DBGLOG_ERROR("No poison %s block at: 0x%lx, actual data:", where, (unsigned long)ptr);
+        dump_mem(ptr, poison_size);
+        DBGLOG_ERROR("\n");
+    }
 
-  return ok;
+    return ok;
 }
 
 /*
  * Check if a block is properly poisoned. Must be called only for non-free
  * blocks.
  */
-static bool check_poison_block( umm_block *pblock ) {
-  bool ok = true;
-
-  if (pblock->header.used.next & UMM_FREELIST_MASK) {
-    DBGLOG_ERROR( "check_poison_block is called for free block 0x%lx\n", (unsigned long)pblock);
-  } else {
-    /* the block is used; let's check poison */
-    unsigned char *pc = (unsigned char *)pblock->body.data;
-    unsigned char *pc_cur;
-
-    pc_cur = pc + sizeof(UMM_POISONED_BLOCK_LEN_TYPE);
-    if (!check_poison(pc_cur, UMM_POISON_SIZE_BEFORE, "before")) {
-      ok = false;
-      goto clean;
+static bool check_poison_block(umm_block *pblock) {
+    bool ok = true;
+
+    if (pblock->header.used.next & UMM_FREELIST_MASK) {
+        DBGLOG_ERROR("check_poison_block is called for free block 0x%lx\n", (unsigned long)pblock);
+    } else {
+        /* the block is used; let's check poison */
+        unsigned char *pc = (unsigned char *)pblock->body.data;
+        unsigned char *pc_cur;
+
+        pc_cur = pc + sizeof(UMM_POISONED_BLOCK_LEN_TYPE);
+        if (!check_poison(pc_cur, UMM_POISON_SIZE_BEFORE, "before")) {
+            ok = false;
+            goto clean;
+        }
+
+        pc_cur = pc + *((UMM_POISONED_BLOCK_LEN_TYPE *)pc) - UMM_POISON_SIZE_AFTER;
+        if (!check_poison(pc_cur, UMM_POISON_SIZE_AFTER, "after")) {
+            ok = false;
+            goto clean;
+        }
     }
 
-    pc_cur = pc + *((UMM_POISONED_BLOCK_LEN_TYPE *)pc) - UMM_POISON_SIZE_AFTER;
-    if (!check_poison(pc_cur, UMM_POISON_SIZE_AFTER, "after")) {
-      ok = false;
-      goto clean;
-    }
-  }
-
 clean:
-  return ok;
+    return ok;
 }
 
 /*
@@ -102,25 +102,25 @@ static bool check_poison_block( umm_block *pblock ) {
  *
  * `size_w_poison` is a size of the whole block, including a poison.
  */
-static void *get_poisoned( void *vptr, size_t size_w_poison ) {
-  unsigned char *ptr = (unsigned char *)vptr;
+static void *get_poisoned(void *vptr, size_t size_w_poison) {
+    unsigned char *ptr = (unsigned char *)vptr;
 
-  if (size_w_poison != 0 && ptr != NULL) {
+    if (size_w_poison != 0 && ptr != NULL) {
 
-    /* Poison beginning and the end of the allocated chunk */
-    put_poison(ptr + sizeof(UMM_POISONED_BLOCK_LEN_TYPE),
-        UMM_POISON_SIZE_BEFORE);
-    put_poison(ptr + size_w_poison - UMM_POISON_SIZE_AFTER,
-        UMM_POISON_SIZE_AFTER);
+        /* Poison beginning and the end of the allocated chunk */
+        put_poison(ptr + sizeof(UMM_POISONED_BLOCK_LEN_TYPE),
+            UMM_POISON_SIZE_BEFORE);
+        put_poison(ptr + size_w_poison - UMM_POISON_SIZE_AFTER,
+            UMM_POISON_SIZE_AFTER);
 
-    /* Put exact length of the user's chunk of memory */
-    *(UMM_POISONED_BLOCK_LEN_TYPE *)ptr = (UMM_POISONED_BLOCK_LEN_TYPE)size_w_poison;
+        /* Put exact length of the user's chunk of memory */
+        *(UMM_POISONED_BLOCK_LEN_TYPE *)ptr = (UMM_POISONED_BLOCK_LEN_TYPE)size_w_poison;
 
-    /* Return pointer at the first non-poisoned byte */
-    ptr += sizeof(UMM_POISONED_BLOCK_LEN_TYPE) + UMM_POISON_SIZE_BEFORE;
-  }
+        /* Return pointer at the first non-poisoned byte */
+        ptr += sizeof(UMM_POISONED_BLOCK_LEN_TYPE) + UMM_POISON_SIZE_BEFORE;
+    }
 
-  return (void *)ptr;
+    return (void *)ptr;
 }
 
 /*
@@ -129,84 +129,85 @@ static void *get_poisoned( void *vptr, size_t size_w_poison ) {
  *
  * Returns unpoisoned pointer, i.e. actual pointer to the allocated memory.
  */
-static void *get_unpoisoned( void *vptr ) {
-   uintptr_t ptr = (uintptr_t)vptr;
+static void *get_unpoisoned(void *vptr) {
+    uintptr_t ptr = (uintptr_t)vptr;
 
-  if (ptr != 0) {
-    uint16_t c;
+    if (ptr != 0) {
+        uint16_t c;
 
-    ptr -= (sizeof(UMM_POISONED_BLOCK_LEN_TYPE) + UMM_POISON_SIZE_BEFORE);
+        ptr -= (sizeof(UMM_POISONED_BLOCK_LEN_TYPE) + UMM_POISON_SIZE_BEFORE);
 
-    umm_heap_context_t *_context = umm_get_ptr_context( vptr );
-    if (NULL == _context) {
-      panic();
-      return NULL;
-    }
-    /* Figure out which block we're in. Note the use of truncated division... */
-    c = (ptr - (uintptr_t)(&(_context->heap[0])))/sizeof(umm_block);
+        umm_heap_context_t *_context = umm_get_ptr_context(vptr);
+        if (NULL == _context) {
+            panic();
+            return NULL;
+        }
+        /* Figure out which block we're in. Note the use of truncated division... */
+        c = (ptr - (uintptr_t)(&(_context->heap[0]))) / sizeof(umm_block);
 
-    check_poison_block(&UMM_BLOCK(c));
-  }
+        check_poison_block(&UMM_BLOCK(c));
+    }
 
-  return (void *)ptr;
+    return (void *)ptr;
 }
 
 /* }}} */
 
 /* ------------------------------------------------------------------------ */
 
-void *umm_poison_malloc( size_t size ) {
-  void *ret;
+void *umm_poison_malloc(size_t size) {
+    void *ret;
 
-  size += poison_size(size);
+    size += poison_size(size);
 
-  ret = umm_malloc( size );
+    ret = umm_malloc(size);
 
-  ret = get_poisoned(ret, size);
+    ret = get_poisoned(ret, size);
 
-  return ret;
+    return ret;
 }
 
 /* ------------------------------------------------------------------------ */
 
-void *umm_poison_calloc( size_t num, size_t item_size ) {
-  void *ret;
-  size_t size = item_size * num;
+void *umm_poison_calloc(size_t num, size_t item_size) {
+    void *ret;
+    size_t size = item_size * num;
 
-  size += poison_size(size);
+    size += poison_size(size);
 
-  ret = umm_malloc(size);
+    ret = umm_malloc(size);
 
-  if (NULL != ret)
-      memset(ret, 0x00, size);
+    if (NULL != ret) {
+        memset(ret, 0x00, size);
+    }
 
-  ret = get_poisoned(ret, size);
+    ret = get_poisoned(ret, size);
 
-  return ret;
+    return ret;
 }
 
 /* ------------------------------------------------------------------------ */
 
-void *umm_poison_realloc( void *ptr, size_t size ) {
-  void *ret;
+void *umm_poison_realloc(void *ptr, size_t size) {
+    void *ret;
 
-  ptr = get_unpoisoned(ptr);
+    ptr = get_unpoisoned(ptr);
 
-  size += poison_size(size);
-  ret = umm_realloc( ptr, size );
+    size += poison_size(size);
+    ret = umm_realloc(ptr, size);
 
-  ret = get_poisoned(ret, size);
+    ret = get_poisoned(ret, size);
 
-  return ret;
+    return ret;
 }
 
 /* ------------------------------------------------------------------------ */
 
-void umm_poison_free( void *ptr ) {
+void umm_poison_free(void *ptr) {
 
-  ptr = get_unpoisoned(ptr);
+    ptr = get_unpoisoned(ptr);
 
-  umm_free( ptr );
+    umm_free(ptr);
 }
 
 /*
@@ -215,32 +216,32 @@ void umm_poison_free( void *ptr ) {
  */
 
 bool umm_poison_check(void) {
-  UMM_CRITICAL_DECL(id_poison);
-  bool ok = true;
-  uint16_t cur;
+    UMM_CRITICAL_DECL(id_poison);
+    bool ok = true;
+    uint16_t cur;
 
-  UMM_INIT_HEAP;
+    UMM_INIT_HEAP;
 
-  UMM_CRITICAL_ENTRY(id_poison);
-  umm_heap_context_t *_context = umm_get_current_heap();
+    UMM_CRITICAL_ENTRY(id_poison);
+    umm_heap_context_t *_context = umm_get_current_heap();
 
-  /* Now iterate through the blocks list */
-  cur = UMM_NBLOCK(0) & UMM_BLOCKNO_MASK;
+    /* Now iterate through the blocks list */
+    cur = UMM_NBLOCK(0) & UMM_BLOCKNO_MASK;
 
-  while( UMM_NBLOCK(cur) & UMM_BLOCKNO_MASK ) {
-    if ( !(UMM_NBLOCK(cur) & UMM_FREELIST_MASK) ) {
-      /* This is a used block (not free), so, check its poison */
-      ok = check_poison_block(&UMM_BLOCK(cur));
-      if (!ok){
-        break;
-      }
-    }
+    while (UMM_NBLOCK(cur) & UMM_BLOCKNO_MASK) {
+        if (!(UMM_NBLOCK(cur) & UMM_FREELIST_MASK)) {
+            /* This is a used block (not free), so, check its poison */
+            ok = check_poison_block(&UMM_BLOCK(cur));
+            if (!ok) {
+                break;
+            }
+        }
 
-    cur = UMM_NBLOCK(cur) & UMM_BLOCKNO_MASK;
-  }
-  UMM_CRITICAL_EXIT(id_poison);
+        cur = UMM_NBLOCK(cur) & UMM_BLOCKNO_MASK;
+    }
+    UMM_CRITICAL_EXIT(id_poison);
 
-  return ok;
+    return ok;
 }
 
 /* ------------------------------------------------------------------------ */