Reformatted Kernel files in Memory subdirectory.

Reformatted BlkPool.CC, HeapCtrl.CC, MAllocFree.CC, MemPag.CC, MemPhysical.CC, PageTables.CC.
This commit is contained in:
TomAwezome 2020-09-14 21:55:57 -04:00
parent ec3aa83b55
commit 7e0ce60071
8 changed files with 697 additions and 601 deletions

View File

@ -136,4 +136,14 @@ StartOS.CC
FileSysFAT.CC FileSysFAT.CC
FileSysRedSea.CC FileSysRedSea.CC
MakeBlkDev.CC MakeBlkDev.CC
Memory/
BlkPool.CC
HeapCtrl.CC
MAllocFree.CC
MakeMemory.CC
MemPag.CC
MemPhysical.CC
PageTables.CC
$FG,7$-Tom$FG,0$ $FG,7$-Tom$FG,0$

View File

@ -1,73 +1,82 @@
U0 BlkPoolAdd(CBlkPool *bp,CMemBlk *m,I64 pags) U0 BlkPoolAdd(CBlkPool *bp, CMemBlk *m, I64 pags)
{//Add mem to BlkPool. {//Add mem to BlkPool.
if (sys_mem_init_flag) if (sys_mem_init_flag)
MemSet(m,sys_mem_init_val,pags*MEM_PAG_SIZE); MemSet(m, sys_mem_init_val, pags * MEM_PAG_SIZE);
PUSHFD PUSHFD
CLI CLI
while (LBts(&bp->locked_flags,BPlf_LOCKED)) while (LBts(&bp->locked_flags, BPlf_LOCKED))
PAUSE PAUSE
m->next=bp->mem_free_list; m->next = bp->mem_free_list;
m->pags=pags; m->pags = pags;
m->mb_signature=MBS_UNUSED_SIGNATURE_VAL; m->mb_signature = MBS_UNUSED_SIGNATURE_VAL;
bp->alloced_u8s+=pags<<MEM_PAG_BITS; bp->alloced_u8s += pags << MEM_PAG_BITS;
bp->mem_free_list=m; bp->mem_free_list = m;
LBtr(&bp->locked_flags,BPlf_LOCKED); LBtr(&bp->locked_flags, BPlf_LOCKED);
POPFD POPFD
} }
U0 BlkPoolInit(CBlkPool *bp,I64 pags) U0 BlkPoolInit(CBlkPool *bp, I64 pags)
{//Make mem chunk into a BlkPool. {//Make mem chunk into a BlkPool.
I64 num; I64 num;
CMemBlk *m; CMemBlk *m;
MemSet(bp,0,sizeof(CBlkPool));
m=(bp(U8 *)+sizeof(CBlkPool)+MEM_PAG_SIZE-1)&~(MEM_PAG_SIZE-1); MemSet(bp, 0, sizeof(CBlkPool));
num=(bp(U8 *)+pags<<MEM_PAG_BITS-m(U8 *))>>MEM_PAG_BITS; m = (bp(U8 *) + sizeof(CBlkPool) + MEM_PAG_SIZE - 1) & ~(MEM_PAG_SIZE - 1);
bp->alloced_u8s=(pags-num)<<MEM_PAG_BITS; //Compensate before num added. num = (bp(U8 *) + pags << MEM_PAG_BITS - m(U8 *)) >> MEM_PAG_BITS;
BlkPoolAdd(bp,m,num); bp->alloced_u8s = (pags-num) << MEM_PAG_BITS; //Compensate before num added.
BlkPoolAdd(bp, m, num);
} }
U0 BlkPoolsInit() U0 BlkPoolsInit()
{ {
I64 i,total,lo,hi,code_heap_limit; I64 i, total, lo, hi, code_heap_limit;
CMemE820 *m20=MEM_E820; CMemE820 *m20 = MEM_E820;
Bool first=TRUE; Bool first = TRUE;
total=MemBIOSTotal; total = MemBIOSTotal;
if (total<=0x80000000) if (total <= 0x80000000)
code_heap_limit=total; code_heap_limit = total;
else if (total<=0x100000000) else if (total <= 0x100000000)
code_heap_limit=total/4; code_heap_limit = total / 4;
else else
code_heap_limit=0x80000000; code_heap_limit = 0x80000000;
i=code_heap_limit-SYS_16MEG_AREA_LIMIT; //See $LK,"RLf_16MEG_SYS_CODE_BP",A="FF:::/Kernel/Memory/PageTables.CC,RLf_16MEG_SYS_CODE_BP"$ i = code_heap_limit - SYS_16MEG_AREA_LIMIT; //See $LK,"RLf_16MEG_SYS_CODE_BP",A="FF:::/Kernel/Memory/PageTables.CC,RLf_16MEG_SYS_CODE_BP"$
BlkPoolAdd(sys_code_bp,SYS_16MEG_AREA_LIMIT,i>>MEM_PAG_BITS); BlkPoolAdd(sys_code_bp, SYS_16MEG_AREA_LIMIT, i >> MEM_PAG_BITS);
mem_heap_limit=i+SYS_16MEG_AREA_LIMIT-1; mem_heap_limit = i + SYS_16MEG_AREA_LIMIT - 1;
if (code_heap_limit<total) { if (code_heap_limit<total)
while (m20->type) { {
if (m20->type==MEM_E820t_USABLE) { while (m20->type)
lo=m20->base; {
hi=m20->base+m20->len; if (m20->type == MEM_E820t_USABLE)
if (lo<code_heap_limit) { {
if (hi>code_heap_limit) lo = m20->base;
lo=code_heap_limit; hi = m20->base + m20->len;
if (lo<code_heap_limit)
{
if (hi > code_heap_limit)
lo = code_heap_limit;
else else
hi=lo; //cancel hi = lo; //cancel
} }
if (code_heap_limit<=lo<hi) { if (code_heap_limit <= lo < hi)
if (first) { {
BlkPoolInit(lo,(hi-lo)>>MEM_PAG_BITS); if (first)
sys_data_bp=lo; {
Fs->data_heap=HeapCtrlInit(,Fs,sys_data_bp); BlkPoolInit(lo, (hi - lo) >> MEM_PAG_BITS);
first=FALSE; sys_data_bp = lo;
} else Fs->data_heap = HeapCtrlInit(, Fs, sys_data_bp);
BlkPoolAdd(sys_data_bp,lo,(hi-lo)>>MEM_PAG_BITS); first = FALSE;
}
else
BlkPoolAdd(sys_data_bp, lo, (hi - lo) >> MEM_PAG_BITS);
} }
} }
m20++; m20++;
} }
} }
LBts(&sys_run_level,RLf_FULL_HEAPS); LBts(&sys_run_level, RLf_FULL_HEAPS);
} }

View File

@ -1,33 +1,37 @@
CHeapCtrl *HeapCtrlInit(CHeapCtrl *hc=NULL,CTask *task=NULL,CBlkPool *bp) CHeapCtrl *HeapCtrlInit(CHeapCtrl *hc=NULL, CTask *task=NULL, CBlkPool *bp)
{//See $LK,"HeapLog",A="MN:HeapLog"$() for an example. {//See $LK,"HeapLog",A="MN:HeapLog"$() for an example.
//Duplicated for $LK,"Zenith Task",A="FF:::/Kernel/KStart64.CC,CHeapCtrl.bp"$. //Duplicated for $LK,"Zenith Task",A="FF:::/Kernel/KStart64.CC,CHeapCtrl.bp"$.
if (!hc) if (!hc)
hc=ZCAlloc(sizeof(CHeapCtrl)); hc = ZCAlloc(sizeof(CHeapCtrl));
hc->hc_signature=HEAP_CTRL_SIGNATURE_VAL; hc->hc_signature = HEAP_CTRL_SIGNATURE_VAL;
hc->mem_task=task; hc->mem_task = task;
hc->bp=bp; hc->bp = bp;
QueueInit(&hc->next_mem_blk); QueueInit(&hc->next_mem_blk);
hc->last_mergable=NULL; hc->last_mergable = NULL;
hc->next_um=hc->last_um=(&hc->next_um)(U8 *)-offset(CMemUsed.next); hc->next_um = hc->last_um = (&hc->next_um)(U8 *) - offset(CMemUsed.next);
return hc; return hc;
} }
U0 HeapCtrlDel(CHeapCtrl *hc) U0 HeapCtrlDel(CHeapCtrl *hc)
{//Free all blks alloced to a HeapCtrl. {//Free all blks alloced to a HeapCtrl.
CMemBlk *m,*m1; CMemBlk *m, *m1;
if (hc) {
if (hc)
{
PUSHFD PUSHFD
CLI CLI
while (LBts(&hc->locked_flags,HClf_LOCKED)) while (LBts(&hc->locked_flags, HClf_LOCKED))
PAUSE PAUSE
m=hc->next_mem_blk; m = hc->next_mem_blk;
while (m!=&hc->next_mem_blk) { while (m != &hc->next_mem_blk)
m1=m->next; {
MemPagTaskFree(m,hc); m1 = m->next;
m=m1; MemPagTaskFree(m, hc);
m = m1;
} }
LBtr(&hc->locked_flags,HClf_LOCKED); LBtr(&hc->locked_flags, HClf_LOCKED);
POPFD POPFD
Free(hc); Free(hc);
} }
} }

View File

@ -4,213 +4,213 @@ asm {
_MALLOC:: _MALLOC::
// Throws 'OutMem' // Throws 'OutMem'
PUSH RBP PUSH RBP
MOV RBP,RSP MOV RBP, RSP
PUSH RSI PUSH RSI
PUSH RDI PUSH RDI
XOR RBX,RBX XOR RBX, RBX
MOV RDX,U64 SF_ARG2[RBP] MOV RDX, U64 SF_ARG2[RBP]
TEST RDX,RDX TEST RDX, RDX
JNZ @@05 JNZ @@05
MOV RDX,U64 FS:CTask.addr[RBX] MOV RDX, U64 FS:CTask.addr[RBX]
@@05: CMP U32 CTask.task_signature[RDX],TASK_SIGNATURE_VAL @@05: CMP U32 CTask.task_signature[RDX], TASK_SIGNATURE_VAL
#assert CTask.task_signature==CHeapCtrl.hc_signature //location signature same #assert CTask.task_signature == CHeapCtrl.hc_signature //location signature same
JNE @@10 JNE @@10
MOV RDX,U64 CTask.data_heap[RDX] MOV RDX, U64 CTask.data_heap[RDX]
@@10: CMP U32 CHeapCtrl.hc_signature[RDX],HEAP_CTRL_SIGNATURE_VAL @@10: CMP U32 CHeapCtrl.hc_signature[RDX], HEAP_CTRL_SIGNATURE_VAL
JE @@15 JE @@15
PUSH RDX PUSH RDX
CALL &SysBadMAlloc CALL &SysBadMAlloc
JMP I32 _SYS_HLT JMP I32 _SYS_HLT
@@15: MOV RAX,U64 SF_ARG1[RBP] @@15: MOV RAX, U64 SF_ARG1[RBP]
PUSHFD PUSHFD
ADD RAX,CMemUsed.start+7 //round-up to I64 ADD RAX, CMemUsed.start + 7 //round-up to I64
AND AL,0xF8 AND AL, 0xF8
#assert CMemUsed.start>=sizeof(CMemUnused) #assert CMemUsed.start >= sizeof(CMemUnused)
CMP RAX,CMemUsed.start CMP RAX, CMemUsed.start
JAE @@20 JAE @@20
MOV RAX,CMemUsed.start MOV RAX, CMemUsed.start
@@20: @@20:
CLI CLI
@@25: LOCK @@25: LOCK
BTS U32 CHeapCtrl.locked_flags[RDX],HClf_LOCKED BTS U32 CHeapCtrl.locked_flags[RDX], HClf_LOCKED
PAUSE //don't know if this inst helps PAUSE //don't know if this inst helps
JC @@25 JC @@25
CMP RAX,MEM_HEAP_HASH_SIZE CMP RAX, MEM_HEAP_HASH_SIZE
JAE @@30 JAE @@30
MOV RSI,U64 CHeapCtrl.heap_hash[RAX+RDX] MOV RSI, U64 CHeapCtrl.heap_hash[RAX + RDX]
TEST RSI,RSI TEST RSI, RSI
JZ @@35 JZ @@35
MOV RCX,U64 CMemUnused.next[RSI] MOV RCX, U64 CMemUnused.next[RSI]
MOV U64 CHeapCtrl.heap_hash[RAX+RDX],RCX MOV U64 CHeapCtrl.heap_hash[RAX + RDX], RCX
JMP I32 MALLOC_ALMOST_DONE JMP I32 MALLOC_ALMOST_DONE
//Big allocation //Big allocation
@@30: ADD RAX,sizeof(CMemBlk)+MEM_PAG_SIZE-1 @@30: ADD RAX, sizeof(CMemBlk) + MEM_PAG_SIZE - 1
SHR RAX,MEM_PAG_BITS SHR RAX, MEM_PAG_BITS
PUSH RDX //preserve HeapCtrl PUSH RDX //preserve HeapCtrl
PUSH RDX PUSH RDX
PUSH RAX PUSH RAX
CALL &MemPagTaskAlloc CALL &MemPagTaskAlloc
POP RDX POP RDX
TEST RAX,RAX TEST RAX, RAX
JZ @@45 //Out of memory JZ @@45 //Out of memory
MOV RSI,RAX MOV RSI, RAX
MOV EAX,U32 CMemBlk.pags[RSI] MOV EAX, U32 CMemBlk.pags[RSI]
SHL RAX,MEM_PAG_BITS SHL RAX, MEM_PAG_BITS
SUB RAX,sizeof(CMemBlk) SUB RAX, sizeof(CMemBlk)
ADD RSI,sizeof(CMemBlk) ADD RSI, sizeof(CMemBlk)
JMP I32 MALLOC_ALMOST_DONE JMP I32 MALLOC_ALMOST_DONE
//Little allocation, chunk-off piece from free list chunks //Little allocation, chunk-off piece from free list chunks
@@35: LEA RSI,U64 CHeapCtrl.malloc_free_list-CMemUnused.next[RDX] @@35: LEA RSI, U64 CHeapCtrl.malloc_free_list - CMemUnused.next[RDX]
@@40: MOV RBX,RSI @@40: MOV RBX, RSI
MOV RSI,U64 CMemUnused.next[RBX] MOV RSI, U64 CMemUnused.next[RBX]
TEST RSI,RSI TEST RSI, RSI
JNZ I32 @@60 JNZ I32 @@60
PUSH RAX //-**** save byte size PUSH RAX //-**** save byte size
ADD RAX,16*MEM_PAG_SIZE-1 ADD RAX, 16 * MEM_PAG_SIZE - 1
SHR RAX,MEM_PAG_BITS SHR RAX, MEM_PAG_BITS
PUSH RDX //preserve HeapCtrl PUSH RDX //preserve HeapCtrl
PUSH RDX PUSH RDX
PUSH RAX PUSH RAX
CALL &MemPagTaskAlloc CALL &MemPagTaskAlloc
POP RDX POP RDX
TEST RAX,RAX TEST RAX, RAX
JNZ @@50 JNZ @@50
//Out of memory //Out of memory
@@45: LOCK @@45: LOCK
BTR U32 CHeapCtrl.locked_flags[RDX],HClf_LOCKED BTR U32 CHeapCtrl.locked_flags[RDX], HClf_LOCKED
POPFD POPFD
PUSH TRUE PUSH TRUE
MOV RAX,'OutMem' MOV RAX, 'OutMem'
PUSH RAX PUSH RAX
CALL I32 &throw CALL I32 &throw
JMP I32 MALLOC_FINAL_EXIT //Never gets here, hopefully. JMP I32 MALLOC_FINAL_EXIT //Never gets here, hopefully.
@@50: MOV RSI,RAX @@50: MOV RSI, RAX
MOV EAX,U32 CMemBlk.pags[RSI] MOV EAX, U32 CMemBlk.pags[RSI]
SHL RAX,MEM_PAG_BITS SHL RAX, MEM_PAG_BITS
//Can it be combined with last chunk? (Never Free these chunks.) //Can it be combined with last chunk? (Never Free these chunks.)
MOV RDI,U64 CHeapCtrl.last_mergable[RDX] MOV RDI, U64 CHeapCtrl.last_mergable[RDX]
LEA RBX,U64 [RSI+RAX] LEA RBX, U64 [RSI + RAX]
CMP RDI,RBX CMP RDI, RBX
JNE @@55 JNE @@55
PUSH RAX PUSH RAX
MOV EAX,U32 CMemBlk.pags[RDI] MOV EAX, U32 CMemBlk.pags[RDI]
ADD U32 CMemBlk.pags[RSI],EAX ADD U32 CMemBlk.pags[RSI],EAX
//QueueRemove //QueueRemove
MOV RAX,U64 CMemBlk.next[RDI] MOV RAX, U64 CMemBlk.next[RDI]
MOV RBX,U64 CMemBlk.last[RDI] MOV RBX, U64 CMemBlk.last[RDI]
MOV U64 CMemBlk.last[RAX],RBX MOV U64 CMemBlk.last[RAX], RBX
MOV U64 CMemBlk.next[RBX],RAX MOV U64 CMemBlk.next[RBX], RAX
POP RAX POP RAX
@@55: MOV U64 CHeapCtrl.last_mergable[RDX],RSI @@55: MOV U64 CHeapCtrl.last_mergable[RDX], RSI
LEA RSI,U64 sizeof(CMemBlk)[RSI] LEA RSI, U64 sizeof(CMemBlk)[RSI]
SUB RAX,sizeof(CMemBlk) SUB RAX, sizeof(CMemBlk)
LEA RBX,U64 CHeapCtrl.malloc_free_list-CMemUnused.next[RDX] LEA RBX, U64 CHeapCtrl.malloc_free_list - CMemUnused.next[RDX]
MOV RDI,U64 CMemUnused.next[RBX] MOV RDI, U64 CMemUnused.next[RBX]
MOV U64 CMemUnused.next[RSI],RDI MOV U64 CMemUnused.next[RSI], RDI
MOV U64 CMemUnused.size[RSI],RAX MOV U64 CMemUnused.size[RSI], RAX
MOV U64 CMemUnused.next[RBX],RSI MOV U64 CMemUnused.next[RBX], RSI
POP RAX //+**** POP RAX //+****
JMP @@70 JMP @@70
@@60: CMP U64 CMemUnused.size[RSI],RAX @@60: CMP U64 CMemUnused.size[RSI], RAX
JB I32 @@40 JB I32 @@40
JNE @@70 JNE @@70
@@65: MOV RDI,U64 CMemUnused.next[RSI] @@65: MOV RDI, U64 CMemUnused.next[RSI]
MOV U64 CMemUnused.next[RBX],RDI MOV U64 CMemUnused.next[RBX], RDI
JMP MALLOC_ALMOST_DONE JMP MALLOC_ALMOST_DONE
@@70: SUB U64 CMemUnused.size[RSI],RAX //UPDATE FREE ENTRY @@70: SUB U64 CMemUnused.size[RSI], RAX //UPDATE FREE ENTRY
CMP U64 CMemUnused.size[RSI],sizeof(CMemUnused) CMP U64 CMemUnused.size[RSI], sizeof(CMemUnused)
JAE @@75 //take from top of block JAE @@75 //take from top of block
ADD U64 CMemUnused.size[RSI],RAX //doesn't fit, undo ADD U64 CMemUnused.size[RSI], RAX //doesn't fit, undo
JMP I32 @@40 JMP I32 @@40
@@75: ADD RSI,U64 CMemUnused.size[RSI] @@75: ADD RSI, U64 CMemUnused.size[RSI]
MALLOC_ALMOST_DONE: MALLOC_ALMOST_DONE:
//RSI=res-CMemUsed.size //RSI = res - CMemUsed.size
//RAX=size+CMemUsed.size //RAX = size + CMemUsed.size
//RDX=HeapCtrl //RDX = HeapCtrl
ADD U64 CHeapCtrl.used_u8s[RDX],RAX ADD U64 CHeapCtrl.used_u8s[RDX], RAX
#if _CONFIG_HEAP_DEBUG #if _CONFIG_HEAP_DEBUG
//QueueInsert //QueueInsert
MOV RDI,U64 CHeapCtrl.last_um[RDX] MOV RDI, U64 CHeapCtrl.last_um[RDX]
MOV U64 CMemUsed.next[RDI],RSI MOV U64 CMemUsed.next[RDI], RSI
MOV U64 CHeapCtrl.last_um[RDX],RSI MOV U64 CHeapCtrl.last_um[RDX], RSI
MOV U64 CMemUsed.last[RSI],RDI MOV U64 CMemUsed.last[RSI], RDI
LEA RDI,U64 CHeapCtrl.next_um-CMemUsed.next[RDX] LEA RDI, U64 CHeapCtrl.next_um - CMemUsed.next[RDX]
MOV U64 CMemUsed.next[RSI],RDI MOV U64 CMemUsed.next[RSI], RDI
//Caller1/Caller2 //Caller1/Caller2
PUSH RDX PUSH RDX
MOV RDX,U64 [MEM_HEAP_LIMIT] MOV RDX, U64 [MEM_HEAP_LIMIT]
MOV RDI,U64 SF_RIP[RBP] MOV RDI, U64 SF_RIP[RBP]
CMP RDI,RDX CMP RDI, RDX
JB @@80 JB @@80
XOR RDI,RDI XOR RDI, RDI
MOV U64 CMemUsed.caller1[RSI],RDI MOV U64 CMemUsed.caller1[RSI], RDI
JMP @@90 JMP @@90
@@80: MOV U64 CMemUsed.caller1[RSI],RDI @@80: MOV U64 CMemUsed.caller1[RSI], RDI
MOV RDI,U64 SF_RBP[RBP] MOV RDI, U64 SF_RBP[RBP]
CMP RDI,RDX CMP RDI, RDX
JB @@85 JB @@85
XOR RDI,RDI XOR RDI, RDI
JMP @@90 JMP @@90
@@85: MOV RDI,U64 SF_RIP[RDI] @@85: MOV RDI, U64 SF_RIP[RDI]
CMP RDI,RDX CMP RDI, RDX
JB @@90 JB @@90
XOR RDI,RDI XOR RDI, RDI
@@90: MOV U64 CMemUsed.caller2[RSI],RDI @@90: MOV U64 CMemUsed.caller2[RSI], RDI
POP RDX POP RDX
#endif #endif
LOCK LOCK
BTR U32 CHeapCtrl.locked_flags[RDX],HClf_LOCKED BTR U32 CHeapCtrl.locked_flags[RDX], HClf_LOCKED
POPFD POPFD
MOV U64 CMemUsed.size[RSI],RAX MOV U64 CMemUsed.size[RSI], RAX
MOV U64 CMemUsed.hc[RSI],RDX MOV U64 CMemUsed.hc[RSI], RDX
LEA RAX,U64 CMemUsed.start[RSI] LEA RAX, U64 CMemUsed.start[RSI]
TEST U8 [SYS_SEMAS+SEMA_HEAPLOG_ACTIVE*DEFAULT_CACHE_LINE_WIDTH],1 TEST U8 [SYS_SEMAS + SEMA_HEAPLOG_ACTIVE * DEFAULT_CACHE_LINE_WIDTH], 1
JZ @@105 JZ @@105
PUSH RAX PUSH RAX
PUSH RAX PUSH RAX
MOV RAX,U64 [SYS_EXTERN_TABLE] MOV RAX, U64 [SYS_EXTERN_TABLE]
MOV RAX,U64 EXT_HEAPLOG_MALLOC*8[RAX] MOV RAX, U64 EXT_HEAPLOG_MALLOC*8[RAX]
TEST RAX,RAX TEST RAX, RAX
JZ @@95 JZ @@95
CALL RAX CALL RAX
JMP @@100 JMP @@100
@@95: ADD RSP,8 @@95: ADD RSP, 8
@@100: POP RAX @@100: POP RAX
@@105: TEST U8 [SYS_HEAP_INIT_FLAG],1 @@105: TEST U8 [SYS_HEAP_INIT_FLAG], 1
JZ MALLOC_FINAL_EXIT JZ MALLOC_FINAL_EXIT
PUSH RAX PUSH RAX
MOV RCX,U64 CMemUsed.size-CMemUsed.start[RAX] MOV RCX, U64 CMemUsed.size - CMemUsed.start[RAX]
SUB RCX,CMemUsed.start SUB RCX, CMemUsed.start
MOV RDI,RAX MOV RDI, RAX
MOV AL,U8 [SYS_HEAP_INIT_VAL] MOV AL, U8 [SYS_HEAP_INIT_VAL]
REP_STOSB REP_STOSB
POP RAX POP RAX
@ -219,34 +219,35 @@ MALLOC_FINAL_EXIT:
POP RSI POP RSI
POP RBP POP RBP
RET1 16 RET1 16
//************************************ //************************************
_FREE:: _FREE::
//Be aware of $LK,"heap_hash",A="FF:::/Kernel/Memory/MAllocFree.CC,heap_hash"$ in $LK,"MemPagTaskAlloc",A="MN:MemPagTaskAlloc"$(). //Be aware of $LK,"heap_hash",A="FF:::/Kernel/Memory/MAllocFree.CC,heap_hash"$ in $LK,"MemPagTaskAlloc",A="MN:MemPagTaskAlloc"$().
PUSH RBP PUSH RBP
MOV RBP,RSP MOV RBP, RSP
PUSH RSI PUSH RSI
PUSH RDI PUSH RDI
TEST U8 [SYS_SEMAS+SEMA_HEAPLOG_ACTIVE*DEFAULT_CACHE_LINE_WIDTH],1 TEST U8 [SYS_SEMAS + SEMA_HEAPLOG_ACTIVE * DEFAULT_CACHE_LINE_WIDTH], 1
JZ @@15 JZ @@15
MOV RBX,U64 SF_ARG1[RBP] MOV RBX, U64 SF_ARG1[RBP]
TEST RBX,RBX TEST RBX, RBX
JZ @@05 JZ @@05
MOV RAX,U64 CMemUsed.size-CMemUsed.start[RBX] MOV RAX, U64 CMemUsed.size - CMemUsed.start[RBX]
TEST RAX,RAX TEST RAX, RAX
JGE @@05 //Aligned alloced chunks have neg size JGE @@05 //Aligned alloced chunks have neg size
ADD RBX,RAX ADD RBX, RAX
@@05: PUSH RBX @@05: PUSH RBX
MOV RAX,U64 [SYS_EXTERN_TABLE] MOV RAX, U64 [SYS_EXTERN_TABLE]
MOV RAX,U64 EXT_HEAPLOG_FREE*8[RAX] MOV RAX, U64 EXT_HEAPLOG_FREE*8[RAX]
TEST RAX,RAX TEST RAX, RAX
JZ @@10 JZ @@10
CALL RAX CALL RAX
JMP @@15 JMP @@15
@@10: ADD RSP,8 @@10: ADD RSP, 8
@@15: MOV RSI,U64 SF_ARG1[RBP] @@15: MOV RSI, U64 SF_ARG1[RBP]
TEST RSI,RSI TEST RSI, RSI
#if _CONFIG_HEAP_DEBUG #if _CONFIG_HEAP_DEBUG
JZ I32 FREE_DONE JZ I32 FREE_DONE
@ -254,192 +255,202 @@ _FREE::
JZ FREE_DONE JZ FREE_DONE
#endif #endif
MOV RAX,U64 CMemUsed.size-CMemUsed.start[RSI] MOV RAX, U64 CMemUsed.size - CMemUsed.start[RSI]
TEST RAX,RAX TEST RAX, RAX
JGE @@20 //Aligned alloced chunks have neg size. JGE @@20 //Aligned alloced chunks have neg size.
//The neg size is offset to start of $LK,"CMemUsed",A="MN:CMemUsed"$ struct. //The neg size is offset to start of $LK,"CMemUsed",A="MN:CMemUsed"$ struct.
ADD RSI,RAX ADD RSI, RAX
@@20: PUSHFD @@20: PUSHFD
SUB RSI,CMemUsed.start SUB RSI, CMemUsed.start
MOV RDX,U64 CMemUsed.hc[RSI] MOV RDX, U64 CMemUsed.hc[RSI]
CMP U32 CHeapCtrl.hc_signature[RDX],HEAP_CTRL_SIGNATURE_VAL CMP U32 CHeapCtrl.hc_signature[RDX], HEAP_CTRL_SIGNATURE_VAL
JE @@25 JE @@25
ADD RSI,CMemUsed.start ADD RSI, CMemUsed.start
PUSH RSI PUSH RSI
CALL &SysBadFree CALL &SysBadFree
JMP I32 _SYS_HLT JMP I32 _SYS_HLT
@@25: MOV RAX,U64 CMemUsed.size[RSI] @@25: MOV RAX, U64 CMemUsed.size[RSI]
SUB U64 CHeapCtrl.used_u8s[RDX],RAX SUB U64 CHeapCtrl.used_u8s[RDX], RAX
CLI CLI
@@30: LOCK @@30: LOCK
BTS U32 CHeapCtrl.locked_flags[RDX],HClf_LOCKED BTS U32 CHeapCtrl.locked_flags[RDX], HClf_LOCKED
PAUSE PAUSE
JC @@30 JC @@30
#if _CONFIG_HEAP_DEBUG #if _CONFIG_HEAP_DEBUG
//QueueRemove //QueueRemove
MOV RDX,U64 CMemUsed.next[RSI] MOV RDX, U64 CMemUsed.next[RSI]
MOV RDI,U64 CMemUsed.last[RSI] MOV RDI, U64 CMemUsed.last[RSI]
MOV U64 CMemUsed.last[RDX],RDI MOV U64 CMemUsed.last[RDX], RDI
MOV U64 CMemUsed.next[RDI],RDX MOV U64 CMemUsed.next[RDI], RDX
//Caller1/Caller2 //Caller1/Caller2
MOV RDX,U64 [MEM_HEAP_LIMIT] MOV RDX, U64 [MEM_HEAP_LIMIT]
MOV RDI,U64 SF_RIP[RBP] MOV RDI, U64 SF_RIP[RBP]
CMP RDI,RDX CMP RDI, RDX
JB @@35 JB @@35
XOR RDI,RDI XOR RDI, RDI
MOV U64 CMemUnused.caller1[RSI],RDI MOV U64 CMemUnused.caller1[RSI], RDI
JMP @@45 JMP @@45
@@35: MOV U64 CMemUnused.caller1[RSI],RDI @@35: MOV U64 CMemUnused.caller1[RSI], RDI
MOV RDI,U64 SF_RBP[RBP] MOV RDI, U64 SF_RBP[RBP]
CMP RDI,RDX CMP RDI, RDX
JB @@40 JB @@40
XOR RDI,RDI XOR RDI, RDI
JMP @@45 JMP @@45
@@40: MOV RDI,U64 SF_RIP[RDI] @@40: MOV RDI, U64 SF_RIP[RDI]
CMP RDI,RDX CMP RDI, RDX
JB @@45 JB @@45
XOR RDI,RDI XOR RDI, RDI
@@45: MOV U64 CMemUnused.caller2[RSI],RDI @@45: MOV U64 CMemUnused.caller2[RSI], RDI
MOV RDX,U64 CMemUsed.hc[RSI] MOV RDX, U64 CMemUsed.hc[RSI]
#endif #endif
CMP RAX,MEM_HEAP_HASH_SIZE CMP RAX, MEM_HEAP_HASH_SIZE
JAE @@50 JAE @@50
#assert CMemUnused.size==CMemUsed.size #assert CMemUnused.size == CMemUsed.size
// MOV U64 CMemUnused.size[RSI],RAX // MOV U64 CMemUnused.size[RSI], RAX
MOV RBX,U64 CHeapCtrl.heap_hash[RAX+RDX] MOV RBX, U64 CHeapCtrl.heap_hash[RAX + RDX]
MOV U64 CMemUnused.next[RSI],RBX MOV U64 CMemUnused.next[RSI], RBX
MOV U64 CHeapCtrl.heap_hash[RAX+RDX],RSI MOV U64 CHeapCtrl.heap_hash[RAX + RDX], RSI
JMP @@55 JMP @@55
@@50: SUB RSI,sizeof(CMemBlk) @@50: SUB RSI, sizeof(CMemBlk)
PUSH RDX PUSH RDX
PUSH RDX PUSH RDX
PUSH RSI PUSH RSI
CALL &MemPagTaskFree CALL &MemPagTaskFree
POP RDX POP RDX
@@55: LOCK @@55: LOCK
BTR U32 CHeapCtrl.locked_flags[RDX],HClf_LOCKED BTR U32 CHeapCtrl.locked_flags[RDX], HClf_LOCKED
POPFD POPFD
FREE_DONE: FREE_DONE:
POP RDI POP RDI
POP RSI POP RSI
POP RBP POP RBP
RET1 8 RET1 8
//************************************ //************************************
_MSIZE:: _MSIZE::
PUSH RBP PUSH RBP
MOV RBP,RSP MOV RBP, RSP
MOV RBX,U64 SF_ARG1[RBP] MOV RBX, U64 SF_ARG1[RBP]
XOR RAX,RAX XOR RAX, RAX
TEST RBX,RBX TEST RBX, RBX
JZ @@10 JZ @@10
MOV RAX,U64 CMemUsed.size-CMemUsed.start[RBX] MOV RAX, U64 CMemUsed.size - CMemUsed.start[RBX]
TEST RAX,RAX TEST RAX, RAX
JGE @@05 //Aligned alloced chunks have neg size JGE @@05 //Aligned alloced chunks have neg size
ADD RBX,RAX ADD RBX, RAX
MOV RAX,U64 CMemUsed.size-CMemUsed.start[RBX] MOV RAX, U64 CMemUsed.size - CMemUsed.start[RBX]
@@05: SUB RAX,CMemUsed.start @@05: SUB RAX, CMemUsed.start
@@10: POP RBP @@10: POP RBP
RET1 8 RET1 8
//************************************ //************************************
_MSIZE2:: _MSIZE2::
PUSH RBP PUSH RBP
MOV RBP,RSP MOV RBP, RSP
MOV RBX,U64 SF_ARG1[RBP] MOV RBX, U64 SF_ARG1[RBP]
XOR RAX,RAX XOR RAX, RAX
TEST RBX,RBX TEST RBX, RBX
JZ @@10 JZ @@10
MOV RAX,U64 CMemUsed.size-CMemUsed.start[RBX] MOV RAX, U64 CMemUsed.size-CMemUsed.start[RBX]
TEST RAX,RAX TEST RAX, RAX
JGE @@05 //Aligned alloced chunks have neg size JGE @@05 //Aligned alloced chunks have neg size
ADD RBX,RAX ADD RBX, RAX
@@05: MOV RAX,U64 CMemUsed.size-CMemUsed.start[RBX] @@05: MOV RAX, U64 CMemUsed.size - CMemUsed.start[RBX]
@@10: POP RBP @@10: POP RBP
RET1 8 RET1 8
//************************************ //************************************
_MHEAP_CTRL:: _MHEAP_CTRL::
PUSH RBP PUSH RBP
MOV RBP,RSP MOV RBP, RSP
MOV RBX,U64 SF_ARG1[RBP] MOV RBX, U64 SF_ARG1[RBP]
XOR RAX,RAX XOR RAX, RAX
TEST RBX,RBX TEST RBX, RBX
JZ @@10 JZ @@10
MOV RAX,U64 CMemUsed.size-CMemUsed.start[RBX] MOV RAX, U64 CMemUsed.size-CMemUsed.start[RBX]
TEST RAX,RAX TEST RAX, RAX
JGE @@05 //Aligned alloced chunks have neg size JGE @@05 //Aligned alloced chunks have neg size
ADD RBX,RAX ADD RBX, RAX
@@05: MOV RAX,U64 CMemUsed.hc-CMemUsed.start[RBX] @@05: MOV RAX, U64 CMemUsed.hc - CMemUsed.start[RBX]
@@10: POP RBP @@10: POP RBP
RET1 8 RET1 8
} }
_extern _FREE U0 Free(U8 *addr); //Free $LK,"MAlloc",A="MN:MAlloc"$()ed memory chunk. _extern _FREE U0 Free(U8 *addr); //Free $LK,"MAlloc",A="MN:MAlloc"$()ed memory chunk.
_extern _MSIZE I64 MSize(U8 *src); //Size of heap object. _extern _MSIZE I64 MSize( U8 *src); //Size of heap object.
_extern _MSIZE2 I64 MSize2(U8 *src); //Internal size of heap object. _extern _MSIZE2 I64 MSize2( U8 *src); //Internal size of heap object.
_extern _MHEAP_CTRL CHeapCtrl *MHeapCtrl(U8 *src); //$LK,"CHeapCtrl",A="MN:CHeapCtrl"$ of object. _extern _MHEAP_CTRL CHeapCtrl *MHeapCtrl(U8 *src); //$LK,"CHeapCtrl",A="MN:CHeapCtrl"$ of object.
_extern _MALLOC U8 *MAlloc(I64 size,CTask *mem_task=NULL); //Alloc memory chunk. _extern _MALLOC U8 *MAlloc(I64 size, CTask *mem_task=NULL); //Alloc memory chunk.
//Accepts a $LK,"CTask",A="MN:CTask"$ or $LK,"CHeapCtrl",A="MN:CHeapCtrl"$. NULL allocs off current task's heap. //Accepts a $LK,"CTask",A="MN:CTask"$ or $LK,"CHeapCtrl",A="MN:CHeapCtrl"$. NULL allocs off current task's heap.
U8 *ZMAlloc(I64 size) U8 *ZMAlloc(I64 size)
{//Alloc memory in Zenith's heap. {//Alloc memory in Zenith's heap.
return MAlloc(size,zenith_task); return MAlloc(size, zenith_task);
} }
U8 *CAlloc(I64 size,CTask *mem_task=NULL) U8 *CAlloc(I64 size, CTask *mem_task=NULL)
{//Accepts a $LK,"CTask",A="MN:CTask"$ or $LK,"CHeapCtrl",A="MN:CHeapCtrl"$. NULL allocs off current task's heap. {//Accepts a $LK,"CTask",A="MN:CTask"$ or $LK,"CHeapCtrl",A="MN:CHeapCtrl"$. NULL allocs off current task's heap.
U8 *res=MAlloc(size,mem_task); U8 *res = MAlloc(size, mem_task);
MemSet(res,0,size);
MemSet(res, 0, size);
return res; return res;
} }
U8 *ZCAlloc(I64 size) U8 *ZCAlloc(I64 size)
{//Alloc and set to zero memory in Zenith's heap. {//Alloc and set to zero memory in Zenith's heap.
return CAlloc(size,zenith_task); return CAlloc(size, zenith_task);
} }
U8 *MAllocIdent(U8 *src,CTask *mem_task=NULL) U8 *MAllocIdent(U8 *src, CTask *mem_task=NULL)
{//Accepts a $LK,"CTask",A="MN:CTask"$ or $LK,"CHeapCtrl",A="MN:CHeapCtrl"$. NULL allocs off current task's heap. {//Accepts a $LK,"CTask",A="MN:CTask"$ or $LK,"CHeapCtrl",A="MN:CHeapCtrl"$. NULL allocs off current task's heap.
U8 *res; U8 *res;
I64 size; I64 size;
if (!src) return NULL;
size=MSize(src); if (!src)
res=MAlloc(size,mem_task); return NULL;
MemCopy(res,src,size); size = MSize(src);
res = MAlloc(size, mem_task);
MemCopy(res, src, size);
return res; return res;
} }
U8 *ZMAllocIdent(U8 *src) U8 *ZMAllocIdent(U8 *src)
{//Alloc in Zenith's heap, ident copy of heap node. {//Alloc in Zenith's heap, ident copy of heap node.
return MAllocIdent(src,zenith_task); return MAllocIdent(src, zenith_task);
} }
U8 *MAllocAligned(I64 size,I64 alignment, U8 *MAllocAligned(I64 size, I64 alignment, CTask *mem_task=NULL, I64 misalignment=0)
CTask *mem_task=NULL,I64 misalignment=0)
{//Only powers of two alignment. This is awful. {//Only powers of two alignment. This is awful.
I64 mask=alignment-1; I64 mask = alignment - 1;
U8 *ptr=MAlloc(size+mask+sizeof(I64)+misalignment,mem_task), U8 *ptr = MAlloc(size + mask + sizeof(I64) + misalignment, mem_task),
*res=(ptr+sizeof(I64)+mask)&~mask+misalignment; *res = (ptr + sizeof(I64) + mask) & ~mask + misalignment;
res(I64 *)[-1]=ptr-res;
#assert offset(CMemUsed.size)==offset(CMemUsed.start)-sizeof(I64) res(I64 *)[-1] = ptr - res;
#assert offset(CMemUsed.size) == offset(CMemUsed.start) - sizeof(I64)
return res; return res;
} }
U8 *CAllocAligned(I64 size,I64 alignment, U8 *CAllocAligned(I64 size, I64 alignment, CTask *mem_task=NULL, I64 misalignment=0)
CTask *mem_task=NULL,I64 misalignment=0)
{//Only powers of two alignment. This is awful. {//Only powers of two alignment. This is awful.
I64 mask=alignment-1; I64 mask = alignment-1;
U8 *ptr=MAlloc(size+mask+sizeof(I64)+misalignment,mem_task), U8 *ptr = MAlloc(size + mask + sizeof(I64) + misalignment, mem_task),
*res=(ptr+sizeof(I64)+mask)&~mask+misalignment; *res = (ptr + sizeof(I64) + mask) & ~mask + misalignment;
res(I64 *)[-1]=ptr-res;
#assert offset(CMemUsed.size)==offset(CMemUsed.start)-sizeof(I64) res(I64 *)[-1] = ptr - res;
MemSet(res,0,size); #assert offset(CMemUsed.size) == offset(CMemUsed.start) - sizeof(I64)
MemSet(res, 0, size);
return res; return res;
} }
@ -449,18 +460,19 @@ U8 *ReAlloc(U8 *ptr, U64 new_size, CTask *mem_task=NULL)
//Useless for changing chunk sizes smaller than 8 bytes because MAlloc allocs 8 bytes at a time. //Useless for changing chunk sizes smaller than 8 bytes because MAlloc allocs 8 bytes at a time.
U8 *res; U8 *res;
if(!new_size) if (!new_size)
{ {
Free(ptr); //we can free NULL Free(ptr); //we can free NULL
return NULL; return NULL;
} }
res = MAlloc(new_size, mem_task); res = MAlloc(new_size, mem_task);
if(!ptr) if (!ptr)
return res; return res;
MemCopy(res, ptr, MinI64(MSize(ptr), new_size)); MemCopy(res, ptr, MinI64(MSize(ptr), new_size));
Free(ptr); Free(ptr);
return res; return res;
} }
@ -469,22 +481,26 @@ U8 *ZReAlloc(U8 *ptr, I64 new_size)
return ReAlloc(ptr, new_size, zenith_task); return ReAlloc(ptr, new_size, zenith_task);
} }
U8 *StrNew(U8 *buf,CTask *mem_task=NULL) U8 *StrNew(U8 *buf, CTask *mem_task=NULL)
{//Accepts a $LK,"CTask",A="MN:CTask"$ or $LK,"CHeapCtrl",A="MN:CHeapCtrl"$. NULL allocs off current task's heap. {//Accepts a $LK,"CTask",A="MN:CTask"$ or $LK,"CHeapCtrl",A="MN:CHeapCtrl"$. NULL allocs off current task's heap.
U8 *res; U8 *res;
I64 size; I64 size;
if (buf) {
size=StrLen(buf)+1; if (buf)
res=MAlloc(size,mem_task); {
MemCopy(res,buf,size); size = StrLen(buf) + 1;
} else { res = MAlloc(size, mem_task);
res=MAlloc(1,mem_task); MemCopy(res, buf, size);
*res=0; }
else
{
res = MAlloc(1, mem_task);
*res = 0;
} }
return res; return res;
} }
U8 *ZStrNew(U8 *buf) U8 *ZStrNew(U8 *buf)
{//Alloc copy of string in Zenith's heap. {//Alloc copy of string in Zenith's heap.
return StrNew(buf,zenith_task); return StrNew(buf, zenith_task);
} }

View File

@ -1,166 +1,200 @@
U0 SysBadFree(I64 *ptr) U0 SysBadFree(I64 *ptr)
{ {
Panic("Bad Free:",ptr); Panic("Bad Free:", ptr);
} }
U0 SysBadMAlloc(I64 *ptr) U0 SysBadMAlloc(I64 *ptr)
{ {
Panic("Bad MAlloc:",ptr); Panic("Bad MAlloc:", ptr);
} }
U8 *MemPagAlloc(I64 pags,CBlkPool *bp=NULL) U8 *MemPagAlloc(I64 pags, CBlkPool *bp=NULL)
{/*Alloc pags from BlkPool. Don't link to task. {/*Alloc pags from BlkPool. Don't link to task.
(Linking to a task means they will be freed when the task dies.) (Linking to a task means they will be freed when the task dies.)
It might give you more than you asked for. It might give you more than you asked for.
Return: NULL if out of memory. Return: NULL if out of memory.
*/ */
CMemBlk *res=NULL,*m; CMemBlk *res = NULL, *m;
I64 i; I64 i;
if (!bp) bp=sys_code_bp;
if (!bp)
bp = sys_code_bp;
PUSHFD PUSHFD
CLI CLI
while (LBts(&bp->locked_flags,BPlf_LOCKED)) while (LBts(&bp->locked_flags, BPlf_LOCKED))
PAUSE PAUSE
if (pags<MEM_FREE_PAG_HASH_SIZE) { if (pags < MEM_FREE_PAG_HASH_SIZE)
if (res=bp->free_pag_hash[pags]) { {
bp->free_pag_hash[pags]=res->next; if (res = bp->free_pag_hash[pags])
{
bp->free_pag_hash[pags] = res->next;
goto at_done; goto at_done;
} }
i=Bsr(MEM_FREE_PAG_HASH_SIZE)+1; i = Bsr(MEM_FREE_PAG_HASH_SIZE) + 1;
} else { } else {
//We'll now round-up to a power of two. //We'll now round-up to a power of two.
//There is some overhead on allocations and //There is some overhead on allocations and
//we wouldn't want to round to the next //we wouldn't want to round to the next
//power of two if a power of two was requested. //power of two if a power of two was requested.
//So we use a little more than a power of two. //So we use a little more than a power of two.
pags-=MEM_EXTRA_HASH2_PAGS; pags -= MEM_EXTRA_HASH2_PAGS;
i=Bsr(pags)+1; i = Bsr(pags) + 1;
pags=1<<i+MEM_EXTRA_HASH2_PAGS; pags = 1 << i + MEM_EXTRA_HASH2_PAGS;
if (res=bp->free_pag_hash2[i]) { if (res = bp->free_pag_hash2[i])
bp->free_pag_hash2[i]=res->next; {
bp->free_pag_hash2[i] = res->next;
goto at_done; goto at_done;
} }
} }
m=&bp->mem_free_list; m = &bp->mem_free_list;
while (TRUE) { while (TRUE)
if (!(res=m->next)) { {
if (!(res = m->next))
{
//We're probably out of luck, but lets search for a //We're probably out of luck, but lets search for a
//freed larger size block... and, screw-it, return the whole thing. //freed larger size block... and, screw-it, return the whole thing.
do { do
if (res=bp->free_pag_hash2[++i]) { {
pags=1<<i+MEM_EXTRA_HASH2_PAGS; if (res = bp->free_pag_hash2[++i])
bp->free_pag_hash2[i]=res->next; {
pags = 1 << i + MEM_EXTRA_HASH2_PAGS;
bp->free_pag_hash2[i] = res->next;
goto at_done; goto at_done;
} }
} while (i<64-MEM_PAG_BITS-1); }
pags=0; while (i < 64 - MEM_PAG_BITS - 1);
res=NULL; //Out of memory
pags = 0;
res = NULL; //Out of memory
goto at_done2; goto at_done2;
} }
if (res->pags<pags) if (res->pags < pags)
m=res; m = res;
else { else
if (res->pags==pags) { {
m->next=res->next; if (res->pags == pags)
{
m->next = res->next;
goto at_done; goto at_done;
} else { }
res->pags-=pags; else
res(U8 *)+=res->pags<<MEM_PAG_BITS; {
res->pags=pags; res->pags -= pags;
res(U8 *) += res->pags << MEM_PAG_BITS;
res->pags = pags;
goto at_done; goto at_done;
} }
} }
} }
at_done: at_done:
bp->used_u8s+=res->pags<<MEM_PAG_BITS; bp->used_u8s += res->pags << MEM_PAG_BITS;
at_done2: at_done2:
LBtr(&bp->locked_flags,BPlf_LOCKED); LBtr(&bp->locked_flags, BPlf_LOCKED);
POPFD POPFD
return res; return res;
} }
U0 MemPagFree(CMemBlk *m,CBlkPool *bp=NULL) U0 MemPagFree(CMemBlk *m, CBlkPool *bp=NULL)
{//Return non-task pags to BlkPool. {//Return non-task pags to BlkPool.
I64 i,pags; I64 i, pags;
if (m) {
if (!bp) bp=sys_code_bp; if (m)
{
if (!bp)
bp = sys_code_bp;
PUSHFD PUSHFD
CLI CLI
while (LBts(&bp->locked_flags,BPlf_LOCKED)) while (LBts(&bp->locked_flags, BPlf_LOCKED))
PAUSE PAUSE
pags=m->pags; pags = m->pags;
m->mb_signature=MBS_UNUSED_SIGNATURE_VAL; m->mb_signature = MBS_UNUSED_SIGNATURE_VAL;
bp->used_u8s-=pags<<MEM_PAG_BITS; bp->used_u8s -= pags << MEM_PAG_BITS;
if (pags<MEM_FREE_PAG_HASH_SIZE) { if (pags < MEM_FREE_PAG_HASH_SIZE)
m->next=bp->free_pag_hash[pags]; {
bp->free_pag_hash[pags]=m; m->next = bp->free_pag_hash[pags];
} else { bp->free_pag_hash[pags] = m;
}
else
{
//We'll now round-up to a power of two. //We'll now round-up to a power of two.
//There is some overhead on allocations and //There is some overhead on allocations and
//we wouldn't want to round to the next //we wouldn't want to round to the next
//power of two if a power of two was requested. //power of two if a power of two was requested.
//So we use a little more than a power of two. //So we use a little more than a power of two.
pags-=MEM_EXTRA_HASH2_PAGS; pags -= MEM_EXTRA_HASH2_PAGS;
i=Bsr(pags); i = Bsr(pags);
m->next=bp->free_pag_hash2[i]; m->next = bp->free_pag_hash2[i];
bp->free_pag_hash2[i]=m; bp->free_pag_hash2[i] = m;
} }
LBtr(&bp->locked_flags,BPlf_LOCKED); LBtr(&bp->locked_flags, BPlf_LOCKED);
POPFD POPFD
} }
} }
CMemBlk *MemPagTaskAlloc(I64 pags,CHeapCtrl *hc) CMemBlk *MemPagTaskAlloc(I64 pags, CHeapCtrl *hc)
{/*hc must be locked. Don't preempt this routine. {/*hc must be locked. Don't preempt this routine.
Currently, this is only called from $LK,"MAlloc",A="MN:MAlloc"$(). Currently, this is only called from $LK,"MAlloc",A="MN:MAlloc"$().
Return: NULL if out of memory. Return: NULL if out of memory.
*/ */
CMemBlk *res; CMemBlk *res;
I64 threshold,count,size; I64 threshold, count, size;
CMemUnused *uum,**_uum,**_ptr; CMemUnused *uum, **_uum, **_ptr;
if (res=MemPagAlloc(pags,hc->bp)) {
QueueInsert(res,hc->last_mem_blk); if (res = MemPagAlloc(pags, hc->bp))
res->mb_signature=MBS_USED_SIGNATURE_VAL; {
hc->alloced_u8s+=res->pags<<MEM_PAG_BITS; QueueInsert(res, hc->last_mem_blk);
res->mb_signature = MBS_USED_SIGNATURE_VAL;
hc->alloced_u8s += res->pags << MEM_PAG_BITS;
//Tidy-up free list (Move into heap hash) //Tidy-up free list (Move into heap hash)
//because if free list gets long, delay causes crash. //because if free list gets long, delay causes crash.
threshold=MEM_HEAP_HASH_SIZE>>4; threshold = MEM_HEAP_HASH_SIZE >> 4;
#assert MEM_HEAP_HASH_SIZE>>4>=sizeof(U8 *) #assert MEM_HEAP_HASH_SIZE >> 4 >= sizeof(U8 *)
do { do
count=0; {
_uum=&hc->malloc_free_list; count = 0;
while (uum=*_uum) { _uum = &hc->malloc_free_list;
while (uum = *_uum)
{
#assert !offset(CMemUnused.next) #assert !offset(CMemUnused.next)
size=uum->size; size = uum->size;
if (size<threshold) { if (size < threshold)
*_uum=uum->next; {
_ptr=(&hc->heap_hash)(U8 *)+size; *_uum = uum->next;
uum->next=*_ptr; _ptr = (&hc->heap_hash)(U8 *) + size;
*_ptr=uum; uum->next = *_ptr;
} else { *_ptr = uum;
}
else
{
count++; count++;
_uum=uum; _uum = uum;
} }
} }
threshold<<=1; threshold <<= 1;
} while (count>8 && threshold<=MEM_HEAP_HASH_SIZE); }
while (count > 8 && threshold <= MEM_HEAP_HASH_SIZE);
} }
return res; return res;
} }
U0 MemPagTaskFree(CMemBlk *m,CHeapCtrl *hc) U0 MemPagTaskFree(CMemBlk *m, CHeapCtrl *hc)
{//hc must be locked {//hc must be locked
if (m) { if (m)
{
PUSHFD PUSHFD
CLI CLI
if (m->mb_signature!=MBS_USED_SIGNATURE_VAL) if (m->mb_signature != MBS_USED_SIGNATURE_VAL)
SysBadFree(m); SysBadFree(m);
else { else
{
QueueRemove(m); QueueRemove(m);
hc->alloced_u8s-=m->pags<<MEM_PAG_BITS; hc->alloced_u8s -= m->pags << MEM_PAG_BITS;
MemPagFree(m,hc->bp); MemPagFree(m, hc->bp);
} }
POPFD POPFD
} }

View File

@ -1,53 +1,59 @@
Bool Mem32DevIns(CMemRange *tmpmr) Bool Mem32DevIns(CMemRange *tmpmr)
{ {
CMemRange *tmpmr1=dev.mem32_head.next,*tmpmr2; CMemRange *tmpmr1 = dev.mem32_head.next, *tmpmr2;
while (tmpmr1!=&dev.mem32_head) {
if (!tmpmr1->type && tmpmr->base>=tmpmr1->base && while (tmpmr1 != &dev.mem32_head)
tmpmr->base+tmpmr->size<=tmpmr1->base+tmpmr1->size) { {
if (tmpmr->base>tmpmr1->base) { if (!tmpmr1->type && tmpmr->base >= tmpmr1->base && tmpmr->base + tmpmr->size <= tmpmr1->base + tmpmr1->size)
tmpmr2=ZMAlloc(sizeof(CMemRange)); {
tmpmr2->type=MRT_UNUSED; if (tmpmr->base > tmpmr1->base)
tmpmr2->flags=0; {
tmpmr2->base=tmpmr1->base; tmpmr2 = ZMAlloc(sizeof(CMemRange));
tmpmr2->size=tmpmr->base-tmpmr1->base; tmpmr2->type = MRT_UNUSED;
QueueInsertRev(tmpmr2,tmpmr1); tmpmr2->flags = 0;
tmpmr2->base = tmpmr1->base;
tmpmr2->size = tmpmr->base - tmpmr1->base;
QueueInsertRev(tmpmr2, tmpmr1);
} }
QueueInsertRev(tmpmr,tmpmr1); QueueInsertRev(tmpmr, tmpmr1);
tmpmr1->size=tmpmr1->base+tmpmr1->size- tmpmr1->size = tmpmr1->base + tmpmr1->size - (tmpmr->base + tmpmr->size);
(tmpmr->base+tmpmr->size); tmpmr1->base = tmpmr->base + tmpmr->size;
tmpmr1->base=tmpmr->base+tmpmr->size; if (!tmpmr1->size)
if (!tmpmr1->size) { {
QueueRemove(tmpmr1); QueueRemove(tmpmr1);
Free(tmpmr1); Free(tmpmr1);
} }
return TRUE; return TRUE;
} }
tmpmr1=tmpmr1->next; tmpmr1 = tmpmr1->next;
} }
return FALSE; return FALSE;
} }
U0 Mem32DevInit() U0 Mem32DevInit()
{ {
CMemRange *tmpmr; CMemRange *tmpmr;
CMemE820 *m20=MEM_E820; CMemE820 *m20 = MEM_E820;
QueueInit(&dev.mem32_head); QueueInit(&dev.mem32_head);
tmpmr=ZMAlloc(sizeof(CMemRange)); tmpmr = ZMAlloc(sizeof(CMemRange));
tmpmr->type=MRT_UNUSED; tmpmr->type = MRT_UNUSED;
tmpmr->flags=0; tmpmr->flags = 0;
//Maybe !!! Change this to 0xF0000000 !!! //Maybe !!! Change this to 0xF0000000 !!!
tmpmr->base=0xE0000000; tmpmr->base = 0xE0000000;
tmpmr->size=0x10000000; tmpmr->size = 0x10000000;
QueueInsert(tmpmr,dev.mem32_head.last); QueueInsert(tmpmr, dev.mem32_head.last);
if (m20->type) { if (m20->type)
while (m20->type) { {
tmpmr=ZMAlloc(sizeof(CMemRange)); while (m20->type)
tmpmr->type=m20->type; {
tmpmr->flags=0; tmpmr = ZMAlloc(sizeof(CMemRange));
tmpmr->base=m20->base; tmpmr->type = m20->type;
tmpmr->size=m20->len; tmpmr->flags = 0;
tmpmr->base = m20->base;
tmpmr->size = m20->len;
if (!Mem32DevIns(tmpmr)) if (!Mem32DevIns(tmpmr))
Free(tmpmr); Free(tmpmr);
m20++; m20++;
@ -55,102 +61,118 @@ U0 Mem32DevInit()
} }
} }
U8 *Mem32DevAlloc(I64 size,I64 alignment) U8 *Mem32DevAlloc(I64 size, I64 alignment)
{//Alloc 32-bit addr space for device. (Doesn't work.) Not used. {//Alloc 32-bit addr space for device. (Doesn't work.) Not used.
//For this to work the BIOS E820 map must be searched for gaps in //For this to work the BIOS E820 map must be searched for gaps in
//the 32-bit range and the pool initialized to the gaps. //the 32-bit range and the pool initialized to the gaps.
U8 *base,*limit; U8 *base, *limit;
CMemRange *tmpmr,*tmpmr1; CMemRange *tmpmr, *tmpmr1;
while (LBts(&sys_semas[SEMA_DEV_MEM],0))
while (LBts(&sys_semas[SEMA_DEV_MEM], 0))
Yield; Yield;
tmpmr1=dev.mem32_head.next; tmpmr1 = dev.mem32_head.next;
while (tmpmr1!=&dev.mem32_head) { while (tmpmr1 != &dev.mem32_head)
base=(tmpmr1->base+alignment-1)&~(alignment-1); {
limit=base+size-1; base = (tmpmr1->base + alignment - 1) & ~(alignment - 1);
if (!tmpmr1->type && limit = base + size - 1;
limit<tmpmr1->base+tmpmr1->size) { if (!tmpmr1->type && limit < tmpmr1->base + tmpmr1->size)
tmpmr=ZMAlloc(sizeof(CMemRange)); {
tmpmr->type=MRT_DEV; tmpmr = ZMAlloc(sizeof(CMemRange));
tmpmr->flags=0; tmpmr->type = MRT_DEV;
tmpmr->base=base; tmpmr->flags = 0;
tmpmr->size=size; tmpmr->base = base;
if (!Mem32DevIns(tmpmr)) { tmpmr->size = size;
if (!Mem32DevIns(tmpmr))
{
Free(tmpmr); Free(tmpmr);
LBtr(&sys_semas[SEMA_DEV_MEM],0); LBtr(&sys_semas[SEMA_DEV_MEM], 0);
return NULL; return NULL;
} }
LBtr(&sys_semas[SEMA_DEV_MEM],0); LBtr(&sys_semas[SEMA_DEV_MEM], 0);
return tmpmr->base; return tmpmr->base;
} }
tmpmr1=tmpmr1->next; tmpmr1 = tmpmr1->next;
} }
LBtr(&sys_semas[SEMA_DEV_MEM],0); LBtr(&sys_semas[SEMA_DEV_MEM], 0);
return NULL; return NULL;
} }
U0 Mem32DevFree(U8 *base) U0 Mem32DevFree(U8 *base)
{//Free 32-bit device address space. {//Free 32-bit device address space.
CMemRange *tmpmr; CMemRange *tmpmr;
if (!base) return;
while (LBts(&sys_semas[SEMA_DEV_MEM],0)) if (!base)
return;
while (LBts(&sys_semas[SEMA_DEV_MEM], 0))
Yield; Yield;
tmpmr=dev.mem32_head.next; tmpmr = dev.mem32_head.next;
while (tmpmr!=&dev.mem32_head) { while (tmpmr != &dev.mem32_head)
if (tmpmr->base==base) { {
tmpmr->type=MRT_UNUSED; if (tmpmr->base == base)
{
tmpmr->type = MRT_UNUSED;
break; break;
} }
tmpmr=tmpmr->next; tmpmr = tmpmr->next;
} }
LBtr(&sys_semas[SEMA_DEV_MEM],0); LBtr(&sys_semas[SEMA_DEV_MEM], 0);
} }
U8 *Mem64DevAlloc(I64 *_pages1Gig) U8 *Mem64DevAlloc(I64 *_pages1Gig)
{//Alloc 64-bit addr space for device. {//Alloc 64-bit addr space for device.
U8 *a; U8 *a;
I64 i=*_pages1Gig,*pte; I64 i = *_pages1Gig, *pte;
while (LBts(&sys_semas[SEMA_DEV_MEM],0))
while (LBts(&sys_semas[SEMA_DEV_MEM], 0))
Yield; Yield;
while (i--) { while (i--)
a=dev.mem64_ptr-=1<<30; {
do { a = dev.mem64_ptr -= 1 << 30;
pte=MemPageTable(a); do
*pte=*pte&~0x18 |0x11; //Uncached and present {
pte = MemPageTable(a);
*pte = *pte & ~0x18 | 0x11; //Uncached and present
InvalidatePage(dev.mem64_ptr); InvalidatePage(dev.mem64_ptr);
a+=mem_page_size; a += mem_page_size;
} while (a-dev.mem64_ptr<1<<30); }
while (a - dev.mem64_ptr < 1 << 30);
} }
LBtr(&sys_semas[SEMA_DEV_MEM],0); LBtr(&sys_semas[SEMA_DEV_MEM], 0);
return dev.mem64_ptr; return dev.mem64_ptr;
} }
U0 Mem64DevFree(U8 *base,I64 pages1Gig) U0 Mem64DevFree(U8 *base, I64 pages1Gig)
{//Free 64-bit device address space. {//Free 64-bit device address space.
if (!base) return; if (!base)
while (LBts(&sys_semas[SEMA_DEV_MEM],0)) return;
while (LBts(&sys_semas[SEMA_DEV_MEM], 0))
Yield; Yield;
if (base==dev.mem64_ptr) if (base == dev.mem64_ptr)
dev.mem64_ptr+=pages1Gig*1<<30; dev.mem64_ptr += pages1Gig * 1 << 30;
//else not freed //else not freed
LBtr(&sys_semas[SEMA_DEV_MEM],0); LBtr(&sys_semas[SEMA_DEV_MEM], 0);
} }
U0 UncachedAliasAlloc() //Make uncached alias for 4 lowest Gig. U0 UncachedAliasAlloc() //Make uncached alias for 4 lowest Gig.
{ {
I64 i=4,*pte; I64 i = 4, *pte;
U8 *a; U8 *a;
a=dev.uncached_alias=Mem64DevAlloc(&i);
do { a = dev.uncached_alias = Mem64DevAlloc(&i);
pte=MemPageTable(a); do
*pte=0x197+a-dev.uncached_alias; {
pte = MemPageTable(a);
*pte = 0x197 + a - dev.uncached_alias;
InvalidatePage(a); InvalidatePage(a);
a+=mem_page_size; a += mem_page_size;
} while (a-dev.uncached_alias<1<<32); }
while (a - dev.uncached_alias < 1 << 32);
} }
I64 MemBIOSTotal() I64 MemBIOSTotal()
{//Returns max of either E801 or E820 mem map. {//Returns max of either E801 or E820 mem map.
I64 total01 = 0x100000, total20 = 0; I64 total01 = 0x100000, total20 = 0;
U16 *mem01 = MEM_E801; U16 *mem01 = MEM_E801;
CMemE820 *mem20 = MEM_E820; CMemE820 *mem20 = MEM_E820;
@ -159,28 +181,29 @@ I64 MemBIOSTotal()
if (mem20->type) if (mem20->type)
{ {
while(mem20->type) while (mem20->type)
{ {
if(mem20->type == MEM_E820t_USABLE) if(mem20->type == MEM_E820t_USABLE)
total20 += mem20->len; total20 += mem20->len;
mem20++; mem20++;
} }
} }
return MaxI64(total01, total20); return MaxI64(total01, total20);
} }
I64 Scale2Mem(I64 min,I64 max,I64 limit=2*1024*1024*1024) I64 Scale2Mem(I64 min, I64 max, I64 limit=2*1024*1024*1024)
{//Helps pick DiskCache and RAMDisk sizes. {//Helps pick DiskCache and RAMDisk sizes.
//Can be used in $LK,"BootHDIns",A="MN:BootHDIns"$() config scripts. //Can be used in $LK,"BootHDIns",A="MN:BootHDIns"$() config scripts.
I64 i; I64 i;
if (sys_data_bp) if (sys_data_bp)
i=sys_data_bp->alloced_u8s; i = sys_data_bp->alloced_u8s;
else else
i=sys_code_bp->alloced_u8s; i = sys_code_bp->alloced_u8s;
if (i>=limit) if (i >= limit)
return max; return max;
else else
return min+(max-min)*i/limit; return min + (max - min) * i / limit;
} }
I64 Seg2Linear(U32 *ptr) I64 Seg2Linear(U32 *ptr)

View File

@ -4,202 +4,202 @@ asm {
USE32 USE32
SYS_INIT_PAGE_TABLES:: SYS_INIT_PAGE_TABLES::
//Check 1Gig page capability and set page size. //Check 1Gig page capability and set page size.
MOV EAX,0x80000001 MOV EAX, 0x80000001
CPUID CPUID
MOV EAX,1<<21 MOV EAX, 1 << 21
BT EDX,26 BT EDX, 26
JNC @@05 JNC @@05
MOV EAX,1<<30 MOV EAX, 1 << 30
@@05: MOV U32 [MEM_PAGE_SIZE],EAX @@05: MOV U32 [MEM_PAGE_SIZE], EAX
//Set mapped space limit //Set mapped space limit
MOV EAX,[MEM_PHYSICAL_SPACE] MOV EAX, [MEM_PHYSICAL_SPACE]
MOV EDX,[MEM_PHYSICAL_SPACE+4] MOV EDX, [MEM_PHYSICAL_SPACE + 4]
BT U32 [MEM_PAGE_SIZE],30 //Round-up to 1Gig boundary? BT U32 [MEM_PAGE_SIZE], 30 //Round-up to 1Gig boundary?
JNC @@10 JNC @@10
ADD EAX,0x3FFFFFFF ADD EAX, 0x3FFFFFFF
ADC EDX,0 ADC EDX, 0
AND EAX,~0x3FFFFFFF AND EAX, ~0x3FFFFFFF
@@10: INC EDX //Need 4Gig extra for uncached alias up at top of space. @@10: INC EDX //Need 4Gig extra for uncached alias up at top of space.
MOV [MEM_MAPPED_SPACE],EAX MOV [MEM_MAPPED_SPACE], EAX
MOV [MEM_MAPPED_SPACE+4],EDX MOV [MEM_MAPPED_SPACE + 4], EDX
//How many 2Meg pages? //How many 2Meg pages?
MOV CL,21 MOV CL, 21
ADD EAX,0x1FFFFF ADD EAX, 0x1FFFFF
ADC EDX,0 ADC EDX, 0
SHRD EAX,EDX SHRD EAX, EDX
SHR EDX,CL SHR EDX, CL
MOV [MEM_2MEG_NUM],EAX MOV [MEM_2MEG_NUM], EAX
MOV [MEM_2MEG_NUM+4],EDX MOV [MEM_2MEG_NUM + 4], EDX
//How many 1Gig pages? //How many 1Gig pages?
MOV CL,9 MOV CL, 9
ADD EAX,0x1FF ADD EAX, 0x1FF
ADC EDX,0 ADC EDX, 0
SHRD EAX,EDX SHRD EAX, EDX
SHR EDX,CL SHR EDX, CL
MOV [MEM_1GIG_NUM],EAX MOV [MEM_1GIG_NUM], EAX
MOV [MEM_1GIG_NUM+4],EDX MOV [MEM_1GIG_NUM + 4], EDX
//How many 512Gig pages? //How many 512Gig pages?
MOV CL,9 MOV CL, 9
ADD EAX,0x1FF ADD EAX, 0x1FF
ADC EDX,0 ADC EDX, 0
SHRD EAX,EDX SHRD EAX, EDX
SHR EDX,CL SHR EDX, CL
MOV [MEM_512GIG_NUM],EAX MOV [MEM_512GIG_NUM], EAX
MOV [MEM_512GIG_NUM+4],EDX MOV [MEM_512GIG_NUM + 4], EDX
//Set $LK,"CSysFixedArea",A="MN:CSysFixedArea"$ to zero //Set $LK,"CSysFixedArea",A="MN:CSysFixedArea"$ to zero
MOV EDI,SYS_FIXED_AREA MOV EDI, SYS_FIXED_AREA
XOR EAX,EAX XOR EAX, EAX
MOV ECX,sizeof(CSysFixedArea)/4 MOV ECX, sizeof(CSysFixedArea) / 4
REP_STOSD REP_STOSD
MOV U32 [MEM_PML2],EDI MOV U32 [MEM_PML2], EDI
//Check for 1Gig page capability. //Check for 1Gig page capability.
BT U32 [MEM_PAGE_SIZE],30 BT U32 [MEM_PAGE_SIZE], 30
JC @@15 JC @@15
//Find PML2 Size //Find PML2 Size
MOV EAX,U32 [MEM_2MEG_NUM] MOV EAX, U32 [MEM_2MEG_NUM]
ADD EAX,0x1FF ADD EAX, 0x1FF
AND EAX,~0x1FF AND EAX, ~0x1FF
SHL EAX,3 SHL EAX, 3
ADD EDI,EAX ADD EDI, EAX
//Find PML3 Size //Find PML3 Size
@@15: MOV U32 [MEM_PML3],EDI @@15: MOV U32 [MEM_PML3], EDI
MOV EAX,U32 [MEM_1GIG_NUM] MOV EAX, U32 [MEM_1GIG_NUM]
ADD EAX,0x1FF ADD EAX, 0x1FF
AND EAX,~0x1FF AND EAX, ~0x1FF
SHL EAX,3 SHL EAX, 3
ADD EDI,EAX ADD EDI, EAX
//Find PML4 Size //Find PML4 Size
MOV U32 [MEM_PML4],EDI MOV U32 [MEM_PML4], EDI
MOV EAX,U32 [MEM_512GIG_NUM] MOV EAX, U32 [MEM_512GIG_NUM]
ADD EAX,0x1FF ADD EAX, 0x1FF
AND EAX,~0x1FF AND EAX, ~0x1FF
SHL EAX,3 SHL EAX, 3
ADD EAX,EDI ADD EAX, EDI
MOV U32 [MEM_HEAP_BASE],EAX MOV U32 [MEM_HEAP_BASE], EAX
//Set page tables to zero //Set page tables to zero
MOV EDI,U32 [MEM_PML2] MOV EDI, U32 [MEM_PML2]
SUB EAX,EDI SUB EAX, EDI
MOV ECX,EAX MOV ECX, EAX
SHR ECX,2 SHR ECX, 2
XOR EAX,EAX XOR EAX, EAX
REP_STOSD REP_STOSD
//Check for 1Gig page capability. //Check for 1Gig page capability.
BT U32 [MEM_PAGE_SIZE],30 BT U32 [MEM_PAGE_SIZE], 30
JC @@30 JC @@30
//PML2: Use 2Meg Pages //PML2: Use 2Meg Pages
MOV EAX,0x87 //bit 7 is page size (2Meg) MOV EAX, 0x87 //bit 7 is page size (2Meg)
XOR EDX,EDX XOR EDX, EDX
MOV EDI,[MEM_PML2] MOV EDI, [MEM_PML2]
MOV ECX,[MEM_2MEG_NUM] MOV ECX, [MEM_2MEG_NUM]
@@20: MOV U32 [EDI],EAX @@20: MOV U32 [EDI], EAX
ADD EDI,4 ADD EDI, 4
MOV U32 [EDI],EDX MOV U32 [EDI], EDX
ADD EDI,4 ADD EDI, 4
ADD EAX,0x200000 ADD EAX, 0x200000
ADC EDX,0 ADC EDX, 0
LOOP @@20 LOOP @@20
//PML3: Use 2Meg Pages //PML3: Use 2Meg Pages
MOV EAX,[MEM_PML2] MOV EAX, [MEM_PML2]
OR EAX,7 OR EAX, 7
XOR EDX,EDX XOR EDX, EDX
MOV EDI,[MEM_PML3] MOV EDI, [MEM_PML3]
MOV ECX,[MEM_1GIG_NUM] MOV ECX, [MEM_1GIG_NUM]
@@25: MOV U32 [EDI],EAX @@25: MOV U32 [EDI], EAX
ADD EDI,4 ADD EDI, 4
MOV U32 [EDI],EDX MOV U32 [EDI], EDX
ADD EDI,4 ADD EDI, 4
ADD EAX,0x1000 ADD EAX, 0x1000
ADC EDX,0 ADC EDX, 0
LOOP @@25 LOOP @@25
JMP @@40 JMP @@40
//PML3: Use 1Gig Pages //PML3: Use 1Gig Pages
@@30: MOV EAX,0x87 //bit 7 is page size (1Gig) @@30: MOV EAX, 0x87 //bit 7 is page size (1Gig)
XOR EDX,EDX XOR EDX, EDX
MOV EDI,[MEM_PML3] MOV EDI, [MEM_PML3]
MOV ECX,[MEM_1GIG_NUM] MOV ECX, [MEM_1GIG_NUM]
@@35: MOV U32 [EDI],EAX @@35: MOV U32 [EDI], EAX
ADD EDI,4 ADD EDI, 4
MOV U32 [EDI],EDX MOV U32 [EDI], EDX
ADD EDI,4 ADD EDI, 4
ADD EAX,0x40000000 ADD EAX, 0x40000000
ADC EDX,0 ADC EDX, 0
LOOP @@35 LOOP @@35
//PML4 //PML4
@@40: MOV EAX,[MEM_PML3] @@40: MOV EAX, [MEM_PML3]
OR EAX,7 OR EAX, 7
XOR EDX,EDX XOR EDX, EDX
MOV EDI,[MEM_PML4] MOV EDI, [MEM_PML4]
MOV ECX,[MEM_512GIG_NUM] MOV ECX, [MEM_512GIG_NUM]
@@45: MOV U32 [EDI],EAX @@45: MOV U32 [EDI], EAX
ADD EDI,4 ADD EDI, 4
MOV U32 [EDI],EDX MOV U32 [EDI], EDX
ADD EDI,4 ADD EDI, 4
ADD EAX,0x1000 ADD EAX, 0x1000
ADC EDX,0 ADC EDX, 0
LOOP @@45 LOOP @@45
RET RET
SYS_INIT_16MEG_SYS_CODE_BP:: SYS_INIT_16MEG_SYS_CODE_BP::
// Init sys_code_bp to BIOS E801 lowest 16Meg val. // Init sys_code_bp to BIOS E801 lowest 16Meg val.
// $LK,"BlkPoolsInit",A="MN:BlkPoolsInit"$() adds the rest. // $LK,"BlkPoolsInit",A="MN:BlkPoolsInit"$() adds the rest.
MOV U32 [SYS_CODE_BP],SYS_FIXED_AREA+CSysFixedArea.sys_code_bp MOV U32 [SYS_CODE_BP], SYS_FIXED_AREA + CSysFixedArea.sys_code_bp
MOV U32 [SYS_CODE_BP+4],0 MOV U32 [SYS_CODE_BP + 4], 0
MOV U32 [SYS_DATA_BP],0 MOV U32 [SYS_DATA_BP], 0
MOV U32 [SYS_DATA_BP+4],0 MOV U32 [SYS_DATA_BP + 4], 0
XOR EAX,EAX XOR EAX, EAX
MOV AX,U16 [MEM_E801] //1 Kb blks between 1M and 16M MOV AX, U16 [MEM_E801] //1 Kb blks between 1M and 16M
SHL EAX,10 SHL EAX, 10
ADD EAX,0x100000 ADD EAX, 0x100000
MOV EDI,U32 [MEM_HEAP_BASE] MOV EDI, U32 [MEM_HEAP_BASE]
SUB EAX,EDI SUB EAX, EDI
//EDI=BASE EAX=SIZE //EDI=BASE EAX=SIZE
TEST U8 [SYS_MEM_INIT_FLAG],1 TEST U8 [SYS_MEM_INIT_FLAG], 1
JZ @@05 JZ @@05
PUSH EAX PUSH EAX
PUSH EDI PUSH EDI
MOV ECX,EAX MOV ECX, EAX
MOV AL,U8 [SYS_MEM_INIT_VAL] MOV AL, U8 [SYS_MEM_INIT_VAL]
REP_STOSB REP_STOSB
POP EDI POP EDI
POP EAX POP EAX
@@05: SHR EAX,MEM_PAG_BITS @@05: SHR EAX, MEM_PAG_BITS
MOV ESI,SYS_FIXED_AREA+CSysFixedArea.sys_code_bp MOV ESI, SYS_FIXED_AREA + CSysFixedArea.sys_code_bp
MOV EBX,U32 CBlkPool.mem_free_list[ESI] MOV EBX, U32 CBlkPool.mem_free_list[ESI]
MOV U32 CMemBlk.next[EDI],EBX MOV U32 CMemBlk.next [EDI], EBX
MOV U32 CMemBlk.next+4[EDI],0 MOV U32 CMemBlk.next + 4 [EDI], 0
MOV U32 CBlkPool.mem_free_list[ESI],EDI MOV U32 CBlkPool.mem_free_list [ESI], EDI
MOV U32 CBlkPool.mem_free_list+4[ESI],0 MOV U32 CBlkPool.mem_free_list + 4 [ESI], 0
MOV U32 CMemBlk.mb_signature[EDI],MBS_UNUSED_SIGNATURE_VAL MOV U32 CMemBlk.mb_signature[EDI], MBS_UNUSED_SIGNATURE_VAL
MOV U32 CMemBlk.pags[EDI],EAX MOV U32 CMemBlk.pags[EDI], EAX
SHL EAX,MEM_PAG_BITS SHL EAX, MEM_PAG_BITS
ADD U32 CBlkPool.alloced_u8s[ESI],EAX ADD U32 CBlkPool.alloced_u8s[ESI], EAX
BTS U32 [SYS_RUN_LEVEL],RLf_16MEG_SYS_CODE_BP BTS U32 [SYS_RUN_LEVEL], RLf_16MEG_SYS_CODE_BP
RET RET
} }
I64 *MemPageTable(U8 *a) I64 *MemPageTable(U8 *a)
{//Point to page table entry for addr. {//Point to page table entry for addr.
if (Bt(&mem_page_size,30)) if (Bt(&mem_page_size, 30))
return *MEM_PML3(U64 *)+a>>30*8; return *MEM_PML3(U64 *) + a >> 30 * 8;
else else
return *MEM_PML2(U64 *)+a>>21*8; return *MEM_PML2(U64 *) + a >> 21 * 8;
} }