?? NEWTITLE := 'MMM$MTR_USER_REQUEST_PROCESSOR' ??
MODULE mmm$mtr_user_request_processor {MMMMUR} ;
?? RIGHT := 110 ??
{
{  PURPOSE:
{     This module contains request processors that deal with interfacing
{     job mode mem mgr requests to mem mgr in mtr mode, locking and unlocking
{     pages, locking and unlocking segments, and setting segment lengths.


?? NEWTITLE := 'Global Declarations Referenced By This Module', EJECT ??
?? PUSH (LISTEXT := ON) ??
*copyc mtc$job_fixed_segment
*copyc osc$table_lock_activity
*copyc syc$monitor_request_codes
*copyc osd$virtual_address
*copyc dfe$error_condition_codes
*copyc mme$condition_codes
*copyc dmt$disk_file_descriptor
*copyc gft$file_desc_entry_p
*copyc gft$locked_file_desc_entry_p
*copyc mmt$active_segment_table
*copyc mmt$io_identifier
*copyc mmt$lus_declarations
*copyc mmt$page_frame_index
*copyc mmt$page_frame_queue_id
*copyc mmt$page_selection_criteria
*copyc mmt$rb_change_segment_table
*copyc mmt$rb_fetch_offset_mod_pages
*copyc mmt$rb_fetch_pva_unwritten_pgs
*copyc mmt$rb_free_flush
*copyc mmt$rb_lock_unlock_pages
*copyc mmt$rb_lock_unlock_segment
*copyc mmt$rb_memory_manager_io
*copyc mmt$rb_set_get_segment_length
*copyc mmt$rb_wait_io_completion
*copyc mmt$segment_access_rights
*copyc mmt$segment_descriptor_table
*copyc mmt$segment_descriptor_table_ex
*copyc ost$cpu_state_table
*copyc ost$execution_control_block
*copyc ost$hardware_subranges
*copyc ost$heap
*copyc ost$page_table
*copyc sft$file_space_limit_kind
*copyc syt$monitor_request_code
?? POP ??
*copyc dfp$fetch_page_status
*copyc dmp$fetch_page_status
*copyc dmp$get_disk_file_descriptor_p
*copyc dmp$get_fau_entry
*copyc gfp$mtr_convert_job_mode_fde_p
*copyc gfp$mtr_get_fde_p
*copyc gfp$mtr_get_locked_fde_p
*copyc gfp$mtr_get_sfid_from_fde_p
*copyc gfp$mtr_unlock_fde_p
*copyc jmp$unlock_ajl
*copyc mmp$asid
*copyc mmp$aste_pointer
*copyc mmp$convert_pva
*copyc mmp$delete_pt_entry
*copyc mmp$fetch_pfti_array_size
*copyc mmp$find_next_pfti
*copyc mmp$get_max_sdt_sdtx_pointer
*copyc mmp$get_verify_asti_in_fde
*copyc mmp$initialize_find_next_pfti
*copyc mmp$mm_free_pages
*copyc mmp$mm_write_modified_pages
*copyc mmp$process_wmp_status
*copyc mmp$purge_all_cache_map
*copyc mmp$relink_page_frame
*copyc mmp$remove_pages_working_set
*copyc mmp$verify_pva
*copyc mmp$xtask_pva_to_sva
*copyc mtp$error_stop
*copyc mtp$set_status_abnormal
*copyc tmp$clear_lock
*copyc tmp$dequeue_task
*copyc tmp$get_taskid_from_task_queue
*copyc tmp$get_xcb_p
*copyc tmp$mtr_begin_lock_activity
*copyc tmp$mtr_end_lock_activity
*copyc tmp$queue_task
*copyc tmp$set_lock
*copyc dmv$number_unavailable_volumes
*copyc mmv$ast_p
*copyc mmv$gpql
*copyc mmv$multiple_caches
*copyc mmv$multiple_page_maps
*copyc mmv$pft_p
*copyc mmv$pfti_array_p
*copyc mmv$pt_p
*copyc osv$page_size
*copyc tmv$ptl_lock
*copyc tmv$ptl_p
*copyc i#move
?? OLDTITLE ??
?? NEWTITLE := 'CONVERT_SVA_TO_PFTE_P', EJECT ??
{ PURPOSE:
{   This procedure returns a pointer to the page frame table entry for a specified pva.

  PROCEDURE convert_sva_to_pfte_p
    (    sva: ost$system_virtual_address;
     VAR pfte_p: ^mmt$page_frame_table_entry;
     VAR status: syt$monitor_status);

    VAR
      count: 1 .. 32,
      hash_sva_param2: integer, {Kludge for compiler bug
      pfti: mmt$page_frame_index,
      pti: ost$page_table_index;


    #HASH_SVA (sva, hash_sva_param2, count, status.normal);
    pti := hash_sva_param2; {Kludge for compiler bug
    IF status.normal = FALSE THEN
      status.condition := mme$page_not_in_page_table;
      RETURN; {----->
    IFEND;

    pfti := (mmv$pt_p^ [pti].rma * 512) DIV osv$page_size;
    status.normal := mmv$pt_p^ [pti].v;
    IF status.normal = FALSE THEN
      status.condition := mme$not_valid_in_page_table;
    IFEND;

    pfte_p := ^mmv$pft_p^ [pfti];

  PROCEND convert_sva_to_pfte_p;
?? OLDTITLE ??
?? NEWTITLE := 'LOCK_PAGES', EJECT ??

{  This procedure processes the lock pages monitor function.

  PROCEDURE lock_pages
    (    sva: ost$system_virtual_address;
         length: ost$byte_count;
     VAR status: syt$monitor_status);

    VAR
      initial_lock_offset: ost$segment_offset,
      lock_length: integer,
      lock_sva: ost$system_virtual_address,
      pfte_p: ^mmt$page_frame_table_entry,
      unlock_status: syt$monitor_status;

    status.normal := TRUE;
    IF length = 0 THEN
      RETURN; {----->
    IFEND;

    IF ((length + sva.offset) > UPPERVALUE (ost$segment_offset)) THEN
      mtp$set_status_abnormal ('MM', mme$lock_unlock_invalid_length, status);
      RETURN; {----->
    IFEND;

    lock_length := length + (sva.offset MOD osv$page_size);
    lock_sva := sva;
    lock_sva.offset := lock_sva.offset - (lock_sva.offset MOD osv$page_size);
    initial_lock_offset := lock_sva.offset;


{  Lock page frame table entries until all specified entries locked or encounter a page frame
{  table entry that is already locked.  If this happens unlock all pages locked so far and
{  return error status.

  /lock_pages_loop/
    WHILE TRUE DO
      convert_sva_to_pfte_p (lock_sva, pfte_p, status);
      IF status.normal = FALSE THEN
        unlock_pages (sva, lock_sva.offset - initial_lock_offset, unlock_status);
        RETURN; {----->
      IFEND;

      IF pfte_p^.locked_page <> mmc$lp_not_locked THEN
        unlock_pages (sva, lock_sva.offset - initial_lock_offset, status);
        mtp$set_status_abnormal ('MM', mme$page_already_locked, status);
        RETURN; {----->
      IFEND;

      pfte_p^.locked_page := mmc$lp_aging_lock;
      lock_length := lock_length - osv$page_size;
      IF lock_length <= 0 THEN
        EXIT /lock_pages_loop/; {----->
      IFEND;

      lock_sva.offset := lock_sva.offset + osv$page_size;
    WHILEND /lock_pages_loop/;

  PROCEND lock_pages;
?? OLDTITLE ??
?? NEWTITLE := 'UNLOCK_PAGES', EJECT ??

{  This procedure processes the unlock pages monitor function.

  PROCEDURE unlock_pages
    (    sva: ost$system_virtual_address;
         length: ost$byte_count;
     VAR status: syt$monitor_status);

    VAR
      unlock_length: integer,
      unlock_sva: ost$system_virtual_address,
      pfte_p: ^mmt$page_frame_table_entry;

    status.normal := TRUE;
    IF length = 0 THEN
      RETURN; {----->
    IFEND;

    IF ((sva.offset + length) > UPPERVALUE (ost$segment_offset)) THEN
      mtp$set_status_abnormal ('MM', mme$lock_unlock_invalid_length, status);
      RETURN; {----->
    IFEND;

    unlock_sva := sva;
    unlock_sva.offset := unlock_sva.offset - (unlock_sva.offset MOD osv$page_size);
    unlock_length := length + (sva.offset MOD osv$page_size);


{  Unlock page frame table entries specified by pva and length.

  /unlock_page_frames/
    WHILE TRUE DO
      convert_sva_to_pfte_p (unlock_sva, pfte_p, status);
      IF (status.normal = TRUE) AND (pfte_p^.locked_page = mmc$lp_aging_lock) THEN
        pfte_p^.locked_page := mmc$lp_not_locked;
      IFEND;

      unlock_length := unlock_length - osv$page_size;
      IF unlock_length <= 0 THEN
        EXIT /unlock_page_frames/; {----->
      IFEND;

      unlock_sva.offset := unlock_sva.offset + osv$page_size;
    WHILEND /unlock_page_frames/;

    status.normal := TRUE;

  PROCEND unlock_pages;
?? OLDTITLE ??
?? NEWTITLE := 'MMP$MTR_CHANGE_SEGMENT_TABLE', EJECT ??
*copy mmh$mtr_change_segment_table

  PROCEDURE [XDCL] mmp$mtr_change_segment_table
    (VAR request_block: mmt$rb_change_segment_table;
         cst_p: ^ost$cpu_state_table);

    VAR
      new_sdt_p: ^cell,
      new_sdtx_p: ^cell,
      old_sdt_p: mmt$max_sdt_p,
      old_sdtx_p: mmt$max_sdtx_p,
      sdt_rma: integer,
      sdt_entries: integer,
      xcb_p: ^ost$execution_control_block;

    request_block.status.normal := TRUE;
    xcb_p := cst_p^.xcb_p;

{  Convert SDT and SDTX pointer from pointers relative to job's address space to pointers
{  relative to monitor's address space.

    new_sdt_p := #ADDRESS (1, #SEGMENT (xcb_p), request_block.new_sdt_offset);
    mmp$get_max_sdt_sdtx_pointer (xcb_p, old_sdt_p, old_sdtx_p);
    sdt_entries := xcb_p^.xp.segment_table_length + 1;
    i#move (old_sdt_p, new_sdt_p, sdt_entries * 8);

{ The only time the new_sdtx_offset should equal zero is when this request
{ is called during job recovery.

    IF request_block.new_sdtx_offset <> 0 THEN
      new_sdtx_p := #ADDRESS (1, #SEGMENT (xcb_p), request_block.new_sdtx_offset);
      i#move (old_sdtx_p, new_sdtx_p, sdt_entries * #SIZE (mmt$segment_descriptor_extended));
      xcb_p^.sdtx_offset := request_block.new_sdtx_offset;
    IFEND;

{  Update the segment table address and length in the job's exchange package.

    #real_memory_address (new_sdt_p, sdt_rma);
    xcb_p^.xp.segment_table_address_1 := sdt_rma DIV 10000(16);
    xcb_p^.xp.segment_table_address_2 := sdt_rma MOD 10000(16);
    xcb_p^.sdt_offset := request_block.new_sdt_offset;
    xcb_p^.xp.segment_table_length := request_block.new_sdt_length;

  PROCEND mmp$mtr_change_segment_table;
?? OLDTITLE ??
?? NEWTITLE := 'MMP$MTR_FETCH_OFFSET_MOD_PAGES', EJECT ??

  PROCEDURE [XDCL] mmp$mtr_fetch_offset_mod_pages
    (VAR rb: mmt$rb_fetch_offset_mod_pages;
         cst_p: ^ost$cpu_state_table);

*copy mmh$mtr_fetch_offset_mod_pages

    TYPE
      array_ptr_type = record
        case b: 0 .. 1 of
        = 0 =
          array_p: ^array [1 .. * ] of ost$segment_offset,
        = 1 =
          array_pva: ost$pva,
        casend,
      recend;

    VAR
      asid: ost$asid,
      aste_p: ^mmt$active_segment_table_entry,
      asti: mmt$ast_index,
      change_array_ptr: array_ptr_type,
      converted_array_p: ^array [1 .. * ] of ost$segment_offset,
      dfd_p: ^dmt$disk_file_descriptor,
      offset_list_index: integer,
      fde_p: gft$file_desc_entry_p,
      p_fau: ^dmt$file_allocation_unit,
      pfti_array_size: integer,
      pft_index: mmt$page_frame_index,
      sva: ost$system_virtual_address;

{  Initialize status.

    rb.status.normal := TRUE;

{  Convert PVA to SVA and get AST pointer.

    gfp$mtr_get_locked_fde_p (rb.sfid, cst_p^.ijle_p, fde_p);
    mmp$get_verify_asti_in_fde (fde_p, rb.sfid, cst_p^.ijl_ordinal, asti);
    IF asti <> 0 THEN
      mmp$asid (asti, asid);
      sva.asid := asid;
      aste_p := ^mmv$ast_p^ [asti];
    ELSE
      rb.offsets_returned := 0;
      RETURN; {----->
    IFEND;

    sva.offset := 0;
{  Locate all possibly MODIFIED pages for segment and get first pfti.

    mmp$initialize_find_next_pfti (sva, 7ffffff0(16), include_partial_pages, psc_all_except_avail, aste_p,
          pft_index);

{  Ensure that caller's array is large enough to hold all the offsets.

    mmp$fetch_pfti_array_size (pfti_array_size);

    IF pfti_array_size > rb.offsets_returned THEN
      rb.offsets_returned := pfti_array_size;
      RETURN; {----->
    IFEND;

    change_array_ptr.array_p := rb.offset_list;
    change_array_ptr.array_pva.seg := #SEGMENT (^rb);
    converted_array_p := change_array_ptr.array_p;

{  Move all offsets to array supplied by caller.

    offset_list_index := 0;

    IF fde_p^.media = gfc$fm_mass_storage_file THEN
      dmp$get_disk_file_descriptor_p (fde_p, dfd_p);
    IFEND;

    WHILE (pft_index <> 0) AND (offset_list_index < rb.offsets_returned) DO
      IF mmv$pt_p^ [mmv$pft_p^ [pft_index].pti].m THEN
        offset_list_index := offset_list_index + 1;
        converted_array_p^ [offset_list_index] := mmv$pft_p^ [pft_index].sva.offset;
      ELSEIF (rb.return_unallocated_offsets) AND (fde_p^.media = gfc$fm_transient_segment) THEN
        offset_list_index := offset_list_index + 1;
        converted_array_p^ [offset_list_index] := mmv$pft_p^ [pft_index].sva.offset;
      ELSEIF (rb.return_unallocated_offsets) AND (fde_p^.media = gfc$fm_mass_storage_file) THEN
        dmp$get_fau_entry (dfd_p, mmv$pft_p^ [pft_index].sva.offset, p_fau);
        IF (p_fau = NIL) OR (p_fau^.state = dmc$fau_free) THEN
          offset_list_index := offset_list_index + 1;
          converted_array_p^ [offset_list_index] := mmv$pft_p^ [pft_index].sva.offset;
        IFEND;
      IFEND;
      mmp$find_next_pfti (pft_index);
    WHILEND;

    rb.offsets_returned := offset_list_index;

  PROCEND mmp$mtr_fetch_offset_mod_pages;
?? OLDTITLE ??
?? NEWTITLE := 'MMP$MTR_FETCH_PVA_UNWRITTEN_PGS', EJECT ??

  PROCEDURE [XDCL] mmp$mtr_fetch_pva_unwritten_pgs
    (VAR rb: mmt$rb_fetch_pva_unwritten_pgs;
         cst_p: ^ost$cpu_state_table);

*copy mmh$mtr_fetch_pva_unwritten_pgs

    VAR
      offset_list_index: 0 .. 6,
      pft_entry: mmt$page_frame_table_entry,
      pft_index: mmt$page_frame_index,
      sva: ost$system_virtual_address;


    mmp$xtask_pva_to_sva (rb.pva, sva, rb.status);
    IF rb.status.normal = FALSE THEN
      RETURN; {----->
    IFEND;

    pft_index := mmv$gpql [mmc$pq_wired].pqle.link.bkw;
    IF rb.subsequent_request_for_same_pva = TRUE THEN

    /search_pft_for_offset/
      BEGIN

      /find_starting_pft_entry/
        WHILE pft_index <> 0 DO
          pft_entry := mmv$pft_p^ [pft_index];
          IF (sva.asid = pft_entry.sva.asid) AND (rb.next_offset_to_return = pft_entry.sva.offset) THEN
            EXIT /search_pft_for_offset/; {----->
          IFEND;

          pft_index := mmv$pft_p^ [pft_index].link.bkw;
        WHILEND /find_starting_pft_entry/;
        mtp$set_status_abnormal ('MM', mme$no_matching_offset, rb.status);
        RETURN; {----->
      END /search_pft_for_offset/;
    IFEND;

    rb.offset_list_overflow := FALSE;

    IF rb.starting_with_first_page = TRUE THEN
      sva.offset := 0;
    ELSE
      sva.offset := ((sva.offset DIV osv$page_size) + 1) * osv$page_size;
    IFEND;

    offset_list_index := 0;


{  Search wired queue of page frame table entries for entry with matching ASID and an
{  offset that is >=  starting offset.

  /search_pft/
    WHILE pft_index <> 0 DO
      pft_entry := mmv$pft_p^ [pft_index];
      IF (sva.asid = pft_entry.sva.asid) AND (sva.offset <= pft_entry.sva.offset) THEN
        IF offset_list_index >= 6 THEN
          rb.offset_list_overflow := TRUE;
          rb.next_offset_to_return := pft_entry.sva.offset;
          rb.offsets_returned := 6;
          RETURN; {----->
        ELSE
          offset_list_index := offset_list_index + 1;
          rb.offset_list [offset_list_index] := pft_entry.sva.offset;
        IFEND;
      IFEND;
      pft_index := pft_entry.link.bkw;
    WHILEND /search_pft/;

    rb.offsets_returned := offset_list_index;

  PROCEND mmp$mtr_fetch_pva_unwritten_pgs;
?? OLDTITLE ??
?? NEWTITLE := 'MMP$MTR_LOCK_UNLOCK_PAGES', EJECT ??

  PROCEDURE [XDCL] mmp$mtr_lock_unlock_pages
    (VAR rb: mmt$rb_lock_unlock_pages;
         cst_p: ^ost$cpu_state_table);

*copy mmh$mtr_lock_unlock_pages

    VAR
      sva: ost$system_virtual_address;

    mmp$xtask_pva_to_sva (rb.pva, sva, rb.status);
    IF rb.status.normal = FALSE THEN
      RETURN; {----->
    IFEND;

    CASE rb.reqcode OF
    = syc$rc_lock_pages =
      CASE rb.lock_page_type OF
      = mmc$lp_aging_lock =
        lock_pages (sva, rb.length, rb.status);
      ELSE
        mtp$set_status_abnormal ('MM', mme$invalid_request, rb.status);
      CASEND;
    = syc$rc_unlock_pages =
      CASE rb.lock_page_type OF
      = mmc$lp_aging_lock =
        unlock_pages (sva, rb.length, rb.status);
      ELSE
        mtp$set_status_abnormal ('MM', mme$invalid_request, rb.status);
      CASEND;
    ELSE
      mtp$set_status_abnormal ('MM', mme$invalid_request, rb.status);
    CASEND;

  PROCEND mmp$mtr_lock_unlock_pages;
?? OLDTITLE ??
?? NEWTITLE := 'MMP$MTR_SET_GET_SEGMENT_LENGTH', EJECT ??

  PROCEDURE [XDCL] mmp$mtr_set_get_segment_length
    (VAR request_block: mmt$rb_set_get_segment_length;
         cst_p: ^ost$cpu_state_table);

*copy mmh$mtr_set_get_segment_length

    VAR
      asid: ost$asid,
      asti: mmt$ast_index,
      aste_p: ^mmt$active_segment_table_entry,
      fde_p: gft$locked_file_desc_entry_p,
      ijl_ordinal: jmt$ijl_ordinal,
      new_segment_length: integer,
      old_eoi_state: mmt$eoi_state,
      old_segment_length: integer,
      page_count_freed: integer,
      sfid: gft$system_file_identifier,
      sva: ost$system_virtual_address;


    fde_p := gfp$mtr_convert_job_mode_fde_p (request_block.fde_p, cst_p);

    CASE request_block.subfunction_code OF
    = mmc$sf_get_segment_length_fde_p =
      IF fde_p^.stack_for_ring <> 0 THEN
        IF cst_p^.xcb_p^.xp.p_register.pva.ring > fde_p^.stack_for_ring THEN
          request_block.segment_length := 0;
        ELSEIF cst_p^.xcb_p^.xp.p_register.pva.ring = fde_p^.stack_for_ring THEN
          request_block.segment_length := #OFFSET (cst_p^.xcb_p^.xp.a0_dynamic_space_pointer);
        ELSE
          request_block.segment_length := cst_p^.xcb_p^.xp.tos_registers [fde_p^.stack_for_ring].pva.offset;
        IFEND;
      ELSE
        IF fde_p^.eoi_state = mmc$eoi_uncertain THEN
          fixup_chapter_length (fde_p);
        IFEND;
        request_block.segment_length := fde_p^.eoi_byte_address;
      IFEND;

    = mmc$sf_set_segment_length_fde_p =
      old_segment_length := fde_p^.eoi_byte_address;
      old_eoi_state := fde_p^.eoi_state;
      new_segment_length := request_block.segment_length;

      fde_p^.eoi_byte_address := new_segment_length;
      fde_p^.eoi_state := mmc$eoi_actual;
      fde_p^.flags.eoi_modified := TRUE;

      IF (old_eoi_state = mmc$eoi_uncertain) OR ((old_segment_length DIV osv$page_size) >
            (new_segment_length DIV osv$page_size)) THEN
        gfp$mtr_get_sfid_from_fde_p (fde_p, sfid, ijl_ordinal);
        mmp$get_verify_asti_in_fde (fde_p, sfid, ijl_ordinal, asti);
        IF asti <> 0 THEN
          aste_p := ^mmv$ast_p^ [asti];
          IF aste_p^.pages_in_memory > 0 THEN
            mmp$asid (fde_p^.asti, asid);
            sva.asid := asid;
            sva.offset := new_segment_length;
            mmp$mm_free_pages (sva, 7fffffff(16), aste_p, FALSE, page_count_freed);
          IFEND;
        IFEND;
      IFEND;

    ELSE
      mtp$error_stop ('MM - Bad option on get_segment length');
    CASEND;

  PROCEND mmp$mtr_set_get_segment_length;
?? OLDTITLE ??
?? NEWTITLE := '  fixup_chapter_length', EJECT ??

{ This procedure will find the unused pages assigned by mmp$page_pull when the
{ task page faulted for a "new page", release them, and set eoi to the end of the
{ last used page.

  PROCEDURE [INLINE] fixup_chapter_length
    (    fde_p: gft$locked_file_desc_entry_p);

    VAR
      asid: ost$asid,
      aste_p: ^mmt$active_segment_table_entry,
      asti: mmt$ast_index,
      count: 1 .. 32,
      eoi: ost$segment_length,
      found: boolean,
      i: 1 .. 10,
      ijl_ordinal: jmt$ijl_ordinal,
      max_eoi: ost$segment_length,
      offset: integer,
      page_count_freed: integer,
      pages_freed: integer,
      pfti_array: array [1 .. 10] of mmt$page_frame_index,
      pti: integer,
      sfid: gft$system_file_identifier,
      sva: ost$system_virtual_address;


{ Check if the asid in the fde is still valid.  If not then there are no pages
{ of the file in memory and EOI will be assumed correct.

    gfp$mtr_get_sfid_from_fde_p (fde_p, sfid, ijl_ordinal);
    mmp$get_verify_asti_in_fde (fde_p, sfid, ijl_ordinal, asti);
    IF asti = 0 THEN
      RETURN; {----->
    IFEND;
    mmp$asid (asti, asid);

{ Start searching for unused pages at the highest page assigned and work backwards,
{ stopping at the first modified page.  Eoi is currently set at the end of the page
{ that faulted.  The number 16384 is an arbitrary number that only must be less than
{ or equal to the minimum allocation unit size.  It was used to determine the number
{ of extra pages to assign.

    eoi := fde_p^.eoi_byte_address;
    sva.asid := asid;

    offset := eoi + 16384 - osv$page_size;
    max_eoi := offset;
    pages_freed := 0;

  /find_eoi/
    WHILE offset > eoi DO
      offset := offset - osv$page_size;
      IF offset < osc$maximum_offset THEN
        sva.offset := offset;
        #HASH_SVA (sva, pti, count, found);
        IF found THEN
          IF mmv$pt_p^ [pti].m THEN
            offset := offset + osv$page_size;
            EXIT /find_eoi/; {----->
          IFEND;
          pages_freed := pages_freed + 1;
          mmv$pt_p^ [pti].v := FALSE;
          pfti_array [pages_freed] := (mmv$pt_p^ [pti].rma * 512) DIV osv$page_size;
        IFEND;
      IFEND;
    WHILEND /find_eoi/;

    fde_p^.eoi_byte_address := offset;
    fde_p^.eoi_state := mmc$eoi_rounded;

    IF pages_freed > 0 THEN
      mmp$purge_all_cache_map;
      FOR i := 1 TO pages_freed DO
        mmp$delete_pt_entry (pfti_array [i], TRUE);
        mmp$relink_page_frame (pfti_array [i], mmc$pq_free)
      FOREND;
    IFEND;

  PROCEND fixup_chapter_length;
?? OLDTITLE ??
?? NEWTITLE := 'MMP$MTR_LOCK_UNLOCK_SEGMENT', EJECT ??

{------------------------------------------------------------------------------------------}
{This procedure processes the following requests:
{   mmp$lock_segment
{   mmp$unlock_segment
{------------------------------------------------------------------------------------------}


  PROCEDURE [XDCL] mmp$mtr_lock_unlock_segment
    (VAR rb: mmt$rb_lock_unlock_segment;
         cst_p: ^ost$cpu_state_table);

    VAR
      iotype: [READ, STATIC] array [mmc$lus_protected_write .. mmc$lus_write] of iot$io_function :=
            [ioc$write_locked_page, ioc$write_page];

    VAR
      aste_p: ^mmt$active_segment_table_entry,
      ste_p: ^mmt$segment_descriptor,
      stxe_p: ^mmt$segment_descriptor_extended,
      taskid: ost$global_task_id,
      xcb_p: ^ost$execution_control_block,
      page_status: gft$page_status,
      qrb_p: ^mmt$rb_lock_unlock_segment,
      fde_entry_p: gft$locked_file_desc_entry_p,
      lock_info_p: ^gft$segment_lock_info,
      dequeue_tasks: boolean,
      sdt_p: mmt$max_sdt_p,
      sdtx_p: mmt$max_sdtx_p,
      count: integer,
      sva: ost$system_virtual_address,
      ijle_p: ^jmt$initiated_job_list_entry,
      io_id: mmt$io_identifier,
      io_count: mmt$active_io_count,
      io_already_active: boolean,
      last_written_pfti: mmt$page_frame_index,
      wmp_status: mmt$write_modified_pages_status;


    mmp$verify_pva (^rb.pva, mmc$sat_read_or_write, rb.status);

    IF rb.status.normal THEN
      mmp$convert_pva (rb.pva, cst_p, sva, fde_entry_p, aste_p, ste_p, stxe_p);
      lock_info_p := ^fde_entry_p^.segment_lock;
      xcb_p := cst_p^.xcb_p;

      CASE rb.request OF
      = mmc$lus_lock_segment =

{ Determine the status/location of the page.

        CASE fde_entry_p^.media OF
        = gfc$fm_transient_segment =
          page_status := gfc$ps_page_doesnt_exist;
        = gfc$fm_mass_storage_file =
          IF dmv$number_unavailable_volumes > 0 THEN
            dmp$fetch_page_status (fde_entry_p, sva.offset, stxe_p^.file_limits_enforced, FALSE
                  {allocate_if_new} , page_status);
          ELSE
            page_status := gfc$ps_page_doesnt_exist;
          IFEND;
        = gfc$fm_served_file =
          dfp$fetch_page_status (fde_entry_p, sva.offset, page_status);
        ELSE
          mtp$error_stop ('MM - bad FDE.MEDIA');
        CASEND;

        IF page_status = gfc$ps_volume_unavailable THEN
          mtp$set_status_abnormal ('MM', mme$volume_unavailable, rb.status);
          RETURN; {----->
        ELSEIF page_status = gfc$ps_server_terminated THEN
          mtp$set_status_abnormal ('DF', dfe$server_has_terminated, rb.status);
          RETURN; {----->
        IFEND;

        IF (stxe_p^.segment_lock <> mmc$lss_none) AND (stxe_p^.segment_lock <> mmc$lss_queued_for_lock_r3) AND
              (stxe_p^.segment_lock <> mmc$lss_queued_for_lock_user) THEN
          mtp$set_status_abnormal ('MM', mme$segment_locked_by_task, rb.status);
        ELSEIF (rb.access = mmc$lus_lock_for_read) AND NOT (lock_info_p^.locked_for_write) AND
              (lock_info_p^.task_queue.head = 0) AND (lock_info_p^.locked_for_read <
              UPPERVALUE (lock_info_p^.locked_for_read)) THEN
          lock_info_p^.locked_for_read := lock_info_p^.locked_for_read + 1;
          IF rb.catalog_segment OR (xcb_p^.xp.p_register.pva.ring > 3) THEN
            tmp$mtr_begin_lock_activity (xcb_p, osc$subsystem_lock_activity);
            stxe_p^.segment_lock := mmc$lss_lock_for_read_user;
          ELSE
            tmp$mtr_begin_lock_activity (xcb_p, osc$system_lock_activity);
            stxe_p^.segment_lock := mmc$lss_lock_for_read_r3;
          IFEND;
        ELSEIF (rb.access = mmc$lus_lock_for_write) AND (lock_info_p^.locked_for_read = 0) AND
              NOT lock_info_p^.locked_for_write THEN
          lock_info_p^.locked_for_write := TRUE;
          IF cst_p^.ijle_p^.override_job_working_set_max < UPPERVALUE (cst_p^.ijle_p^.
                override_job_working_set_max) THEN
            cst_p^.ijle_p^.override_job_working_set_max := cst_p^.ijle_p^.override_job_working_set_max + 1;
          IFEND;
          IF rb.catalog_segment OR (xcb_p^.xp.p_register.pva.ring > 3) THEN
            tmp$mtr_begin_lock_activity (xcb_p, osc$subsystem_lock_activity);
            stxe_p^.segment_lock := mmc$lss_lock_for_write_user;
          ELSE
            tmp$mtr_begin_lock_activity (xcb_p, osc$system_lock_activity);
            stxe_p^.segment_lock := mmc$lss_lock_for_write_r3;
          IFEND;
        ELSE
          mtp$set_status_abnormal ('MM', mme$segment_locked_another_task, rb.status);
          IF rb.wait = osc$wait THEN
            IF rb.catalog_segment OR (xcb_p^.xp.p_register.pva.ring > 3) THEN
              stxe_p^.segment_lock := mmc$lss_queued_for_lock_user;
            ELSE
              stxe_p^.segment_lock := mmc$lss_queued_for_lock_r3;
            IFEND;
            tmp$queue_task (cst_p^.taskid, tmc$ts_segment_lock_wait, lock_info_p^.task_queue);
          IFEND;
        IFEND;

      = mmc$lus_unlock_segment =
        IF stxe_p^.segment_lock = mmc$lss_none THEN
          mtp$set_status_abnormal ('MM', mme$segment_not_locked, rb.status);
        ELSEIF lock_info_p^.locked_for_write THEN
          sva.offset := 0;
          CASE rb.page_disposition OF
          = mmc$lus_write, mmc$lus_protected_write =
            io_id.specified := FALSE;
            io_id.io_function := iotype [rb.page_disposition];
            mmp$mm_write_modified_pages (sva, 7ffffff0(16), fde_entry_p, aste_p, iotype [rb.page_disposition],
                  rb.init_new_io, FALSE, io_id, io_count, io_already_active, last_written_pfti, wmp_status);
            mmp$process_wmp_status (wmp_status, last_written_pfti, rb.wait, rb.init_new_io, rb.status);
            IF ((wmp_status <> mmc$wmp_io_complete) AND (wmp_status <> mmc$wmp_io_active)) OR
                  ((wmp_status = mmc$wmp_io_active) AND (rb.wait = osc$wait)) THEN
              RETURN; {----->
            IFEND;
          = mmc$lus_remove_from_working_set =
            mmp$remove_pages_working_set (sva, 7ffffff0(16), aste_p, count);
          = mmc$lus_free =
            mmp$mm_free_pages (sva, 7ffffff0(16), aste_p, FALSE, count);
          = mmc$lus_none =
          ELSE
            mtp$set_status_abnormal ('MM', mme$invalid_request, rb.status);
            RETURN; {----->
          CASEND;
          IF stxe_p^.segment_lock = mmc$lss_lock_for_write_r3 THEN
            tmp$mtr_end_lock_activity (cst_p, osc$system_lock_activity, xcb_p);
          ELSE
            tmp$mtr_end_lock_activity (cst_p, osc$subsystem_lock_activity, xcb_p);
          IFEND;
          lock_info_p^.locked_for_write := FALSE;

          IF cst_p^.ijle_p^.override_job_working_set_max > 0 THEN
            cst_p^.ijle_p^.override_job_working_set_max := cst_p^.ijle_p^.override_job_working_set_max - 1;
          IFEND;
        ELSE
          IF stxe_p^.segment_lock = mmc$lss_lock_for_read_r3 THEN
            tmp$mtr_end_lock_activity (cst_p, osc$system_lock_activity, xcb_p);
          ELSE
            tmp$mtr_end_lock_activity (cst_p, osc$subsystem_lock_activity, xcb_p);
          IFEND;
          lock_info_p^.locked_for_read := lock_info_p^.locked_for_read - 1;
        IFEND;
        stxe_p^.segment_lock := mmc$lss_none;

        dequeue_tasks := NOT lock_info_p^.locked_for_write AND
              ((lock_info_p^.locked_for_read = 0) OR (lock_info_p^.locked_for_read =
              UPPERVALUE (lock_info_p^.locked_for_read) - 1));

{ Set the PTL lock while scanning the segment_lock task_queue.  Tmp$set_task_ready, which runs
{ asynchronously--NOT under the master monitor interlock, can be removing tasks from the queue
{ on the other processor.

{NOTE: From here on, XCB_P is no longer the CST_P^.XCB_P, but the XCB_P of the task to dequeue!

        tmp$set_lock (tmv$ptl_lock{, mtc$abandon});
        WHILE dequeue_tasks AND (lock_info_p^.task_queue.head <> 0) DO
          tmp$get_taskid_from_task_queue (lock_info_p^.task_queue, taskid);
          tmp$get_xcb_p (taskid, xcb_p, ijle_p);
          IF xcb_p <> NIL THEN
            qrb_p := #LOC (xcb_p^.xp.x_registers [0]);
            mmp$get_max_sdt_sdtx_pointer (xcb_p, sdt_p, sdtx_p);
            stxe_p := ^sdtx_p^.sdtx_table [#SEGMENT (qrb_p^.pva)];
            IF qrb_p^.access = mmc$lus_lock_for_read THEN
              lock_info_p^.locked_for_read := lock_info_p^.locked_for_read + 1;
              IF stxe_p^.segment_lock = mmc$lss_queued_for_lock_r3 THEN
                tmp$mtr_begin_lock_activity (xcb_p, osc$system_lock_activity);
                stxe_p^.segment_lock := mmc$lss_lock_for_read_r3;
              ELSE
                tmp$mtr_begin_lock_activity (xcb_p, osc$subsystem_lock_activity);
                stxe_p^.segment_lock := mmc$lss_lock_for_read_user;
              IFEND;
              IF lock_info_p^.locked_for_read = UPPERVALUE (lock_info_p^.locked_for_read) THEN
                dequeue_tasks := FALSE;
              IFEND;
            ELSEIF lock_info_p^.locked_for_read = 0 THEN
              lock_info_p^.locked_for_write := TRUE;
              IF stxe_p^.segment_lock = mmc$lss_queued_for_lock_r3 THEN
                tmp$mtr_begin_lock_activity (xcb_p, osc$system_lock_activity);
                stxe_p^.segment_lock := mmc$lss_lock_for_write_r3;
              ELSE
                tmp$mtr_begin_lock_activity (xcb_p, osc$subsystem_lock_activity);
                stxe_p^.segment_lock := mmc$lss_lock_for_write_user;
              IFEND;
              dequeue_tasks := FALSE;
            ELSE
              jmp$unlock_ajl (ijle_p);
              tmp$clear_lock (tmv$ptl_lock);
              RETURN; {----->
            IFEND;
            qrb_p^.status.normal := TRUE;
            jmp$unlock_ajl (ijle_p);
          IFEND;
          tmp$dequeue_task (lock_info_p^.task_queue, taskid);
        WHILEND;
        tmp$clear_lock (tmv$ptl_lock);
      ELSE
        mtp$set_status_abnormal ('MM', mme$invalid_request, rb.status);
      CASEND;
    IFEND;

  PROCEND mmp$mtr_lock_unlock_segment;
?? OLDTITLE ??
?? NEWTITLE := 'mmp$mtr_wait_io_completion', EJECT ??
*copyc mmh$wait_io_completion

  PROCEDURE [XDCL] mmp$mtr_wait_io_completion
    (VAR rb: mmt$rb_wait_io_completion;
         cst_p: ^ost$cpu_state_table);

    VAR
      count: 1 .. 32,
      found: boolean,
      pti: integer,
      pfti: mmt$page_frame_index,
      sva: ost$system_virtual_address;

    mmp$verify_pva (^rb.pva, mmc$sat_read_or_write, rb.status);
    IF rb.status.normal THEN
      mmp$xtask_pva_to_sva (rb.pva, sva, rb.status);
      IF rb.status.normal THEN
        #HASH_SVA (sva, pti, count, found);
        IF found THEN
          pfti := (mmv$pt_p^ [pti].rma * 512) DIV osv$page_size;
          IF mmv$pft_p^ [pfti].active_io_count <> 0 THEN
            cst_p^.xcb_p^.page_wait_info.pva := NIL;
            tmp$queue_task (cst_p^.taskid, tmc$ts_io_wait_queued, mmv$pft_p^ [pfti].task_queue);
          IFEND;
        IFEND;
      IFEND;
    IFEND;
  PROCEND mmp$mtr_wait_io_completion;
?? OLDTITLE ??
?? NEWTITLE := 'MMP$MODIFY_PAGES', EJECT ??

  PROCEDURE [XDCL] mmp$modify_pages
    (    fde_p: gft$locked_file_desc_entry_p;
         offset: ost$segment_offset;
         length: ost$byte_count;
         set_modified_bit: boolean;
     VAR status: syt$monitor_status);


{  This procedure verifies that all pages of a given sva range
{  are in memory and optionally sets the modified bits.

{  This request is used only by dmp$reallocate_file_space to verify
{  that all the pages of an allocation unit being reallocated are in memory
{  and to cause them to be modified and therefore written to the new allocation
{  unit.

    VAR
      asid: ost$asid,
      asti: mmt$ast_index,
      ijlo: jmt$ijl_ordinal,
      lock_length: integer,
      sfid: gft$system_file_identifier,
      sva: ost$system_virtual_address,
      pfte_p: ^mmt$page_frame_table_entry,
      pfti: mmt$page_frame_index;


    status.normal := TRUE;

    gfp$mtr_get_sfid_from_fde_p (fde_p, sfid, ijlo);
    mmp$get_verify_asti_in_fde (fde_p, sfid, ijlo, asti);
    IF asti = 0 THEN
      mtp$set_status_abnormal ('MM', mme$page_not_in_page_table, status);
      RETURN; {----->
    IFEND;
    mmp$asid (asti, asid);

    lock_length := length + (offset MOD osv$page_size);
    sva.asid := asid;
    sva.offset := offset;

  /modify_pages_loop/
    WHILE TRUE DO
      convert_sva_to_pfte_p (sva, pfte_p, status);
      IF NOT status.normal THEN
        mtp$set_status_abnormal ('MM', mme$page_not_in_page_table, status);
        RETURN; {----->
      IFEND;

      IF NOT mmv$pt_p^ [pfte_p^.pti].v THEN
        {Return if page is not modify-able
        mtp$set_status_abnormal ('MM', mme$page_not_in_page_table, status);
        RETURN; {----->
      IFEND;

      IF set_modified_bit THEN
        mmv$pt_p^ [pfte_p^.pti].m := TRUE;
        {Allow retry of write operations
        pfte_p^.io_error := ioc$no_error;
        pfti := (mmv$pt_p^ [pfte_p^.pti].rma * 512) DIV osv$page_size;
        mmp$relink_page_frame (pfti, pfte_p^.aste_p^.queue_id);
      ELSEIF pfte_p^.active_io_count > 0 THEN
        {Return if page is not idle
        mtp$set_status_abnormal ('MM', mme$page_not_in_page_table, status);
        RETURN; {----->
      IFEND;

      lock_length := lock_length - osv$page_size;
      IF lock_length <= 0 THEN
        EXIT /modify_pages_loop/; {----->
      IFEND;

      sva.offset := sva.offset + osv$page_size;
    WHILEND /modify_pages_loop/;

  PROCEND mmp$modify_pages;
MODEND mmm$mtr_user_request_processor;
