?? RIGHT := 110 ??
MODULE mmm$monitor_request_processor;
?? RIGHT := 110 ??

{
{  PURPOSE: Memory_Manager
{     This module contains the monitor routines that are used to
{     manage physical memory and the page table.
{
?? NEWTITLE := '  Global Declarations Referenced by this module' ??
?? PUSH (LISTEXT := ON) ??
*copyc mmc$debug_constants
*copyc mmc$manage_memory_utility
*copyc mtc$job_fixed_segment
*copyc osc$processor_defined_registers
*copyc osc$purge_map_and_cache
*copyc tmc$signal_identifiers
*copyc mmd$segment_access_condition
*copyc dfe$error_condition_codes
*copyc mme$condition_codes
*copyc tme$monitor_mode_exceptions
*copyc dmt$chapter_info
*copyc dmt$mass_storage_error_codes
*copyc dmt$transfer_size
*copyc gft$file_desc_entry_p
*copyc gft$locked_file_desc_entry_p
*copyc iot$io_error
*copyc iot$io_function
*copyc iot$tape_collected_pp_response
*copyc jmt$dispatching_priority
*copyc jmt$initiated_job_list_entry
*copyc jmt$job_scheduler_event
*copyc mmt$active_segment_table
*copyc mmt$asid_list_page_table_full
*copyc mmt$buffer_descriptor
*copyc mmt$dynamic_aging_statistics
*copyc mmt$image_file
*copyc mmt$int_segment_access_fault
*copyc mmt$io_identifier
*copyc mmt$mainframe_wired_asid
*copyc mmt$make_pt_entry_status
*copyc mmt$page_frame_index
*copyc mmt$page_frame_queue_id
*copyc mmt$page_queue_list
*copyc mmt$page_selection_criteria
*copyc mmt$pfti_array
*copyc mmt$pt_full_status
*copyc mmt$rb_advise
*copyc mmt$rb_free_flush
*copyc mmt$rb_lock_ring_1_stack
*copyc mmt$rb_memory_manager_io
*copyc mmt$rb_ring1_segment_request
*copyc mmt$rb_ring1_server_seg_request
*copyc mmt$rb_segment_request
*copyc mmt$rcv_memory_mgr
*copyc mmt$rma_list
*copyc mmt$segment_access_rights
*copyc mmt$selected_page_fault_signal
*copyc mmt$update_eoi_reason
*copyc mmt$write_modified_pages_status
*copyc ost$cpu_state_table
*copyc ost$heap
*copyc ost$segment_access_control
*copyc sft$file_space_limit_kind
*copyc syt$monitor_flag
?? POP ??
*copyc dfi$monitor_display
*copyc dmp$deallocate_file_space
*copyc dpp$convert_int_to_str_hex
*copyc gfp$mtr_get_fde_p
*copyc gfp$mtr_get_locked_fde_p
*copyc jmp$check_scheduler_memory_wait
*copyc jmf$ijle_p
*copyc jmp$ijl_block_valid
*copyc jmp$lock_ajl
*copyc jmp$set_scheduler_event
*copyc jmp$unlock_ajl
*copyc jsp$adv_expired_swapped_jobs
*copyc jsp$initiate_swapout_io
*copyc jsp$io_complete
*copyc jsp$recalculate_swapped_pages
*copyc mmp$age_job_working_set
*copyc mmp$asid
*copyc mmp$aste_pointer
*copyc mmp$aste_pointer_from_pfti
*copyc mmp$asti
*copyc mmp$change_asid
*copyc mmp$check_queues
*copyc mmp$convert_pva
*copyc mmp$delete_last_pfti_from_array
*copyc mmp$delete_pt_entry
*copyc mmp$determine_shared_queue_id
*copyc mmp$fetch_pfti_array_size
*copyc mmp$find_next_pfti
*copyc mmp$free_asid
*copyc mmp$free_memory_in_job_queues
*copyc mmp$get_inhibit_io_status
*copyc mmp$get_max_sdt_pointer
*copyc mmp$get_max_sdt_sdtx_pointer
*copyc mmp$get_sdt_entry_p
*copyc mmp$get_sdtx_entry_p
*copyc mmp$get_verify_asti_in_fde
*copyc mmp$initialize_find_next_pfti
*copyc mmp$link_page_frame_to_queue
*copyc mmp$maintain_memory_thresholds
*copyc mmp$make_pt_entry
*copyc mmp$mtr_set_get_segment_length
*copyc mmp$process_page_table_full
*copyc mmp$process_volume_unavailable
*copyc mmp$purge_all_cache
*copyc mmp$purge_all_cache_map
*copyc mmp$purge_all_page_map
*copyc mmp$reclaim_ast_entries
*copyc mmp$relink_page_frame
*copyc mmp$remove_page_from_job
*copyc mmp$remove_page_from_jws
*copyc mmp$remove_pages_from_jws
*copyc mmp$remove_stale_pages
*copyc mmp$reset_find_next_pfti
*copyc mmp$reset_store_next_pfti
*copyc mmp$store_next_pfti
*copyc mmp$sva_purge_all_page_map
*copyc mmp$sva_purge_one_page_map
*copyc mmp$verify_pva
*copyc mmp$write_page_to_disk
*copyc mmp$xcheck_queues
*copyc mmp$xtask_pva_to_sva
*copyc mtf$cst_p
*copyc mtp$error_stop
*copyc mtp$set_status_abnormal
*copyc osp$process_keypoint_io_error
*copyc osp$process_keypoint_periodic
*copyc tmp$cause_task_switch
*copyc tmp$check_for_swapout_candidate
*copyc tmp$check_timed_wait_not_queued
*copyc tmp$clear_lock
*copyc tmp$dequeue_high_priority_task
*copyc tmp$dequeue_task
*copyc tmp$find_next_queued_task
*copyc tmp$get_taskid_from_task_queue
*copyc tmp$get_xcb_p
*copyc tmp$idle_non_dispatchable_job
*copyc tmp$obtain_ijl_ordinal_from_ptl
*copyc tmp$queue_task
*copyc tmp$reissue_monitor_request
*copyc tmp$send_monitor_fault
*copyc tmp$set_lock
*copyc tmp$set_monitor_flag
*copyc tmp$set_task_ready
*copyc dfv$file_server_debug_enabled
*copyc gfv$null_sfid
*copyc jmv$ajl_p
*copyc jmv$idle_dispatching_controls
*copyc jmv$ijl_p
*copyc jmv$job_scheduler_table
*copyc jmv$max_ajl_ordinal_in_use
*copyc jmv$max_class_working_set
*copyc jmv$null_ijl_ordinal
*copyc jmv$scan_idle_dispatch_interval
*copyc jmv$system_ijl_ordinal
*copyc jsv$pages_needed_for_sfd
*copyc jsv$swapped_page_entry_size
*copyc mmv$aggressive_aging_level
*copyc mmv$aging_statistics
*copyc mmv$ast_p
*copyc mmv$async_work
*copyc mmv$avail_mod_q_decr_per_task
*copyc mmv$avail_mod_q_floor_min
*copyc mmv$avail_mod_wait_queue
*copyc mmv$avail_mod_waitq_dequeue_int
*copyc mmv$avail_modified_queue_max
*copyc mmv$gpql
*copyc mmv$last_active_shared_queue
*copyc mmv$max_template_segment_number
*copyc mmv$max_working_set_size
*copyc mmv$mem_wait_q_dequeue_interval
*copyc mmv$memory_wait_queue
*copyc mmv$multi_page_write
*copyc mmv$multiple_caches
*copyc mmv$multiple_page_maps
*copyc mmv$no_memory_buffering
*copyc mmv$pft_p
*copyc mmv$pfti_array_p
*copyc mmv$pt_length
*copyc mmv$pt_p
*copyc mmv$reassignable_page_frames
*copyc mmv$resident_job_target
*copyc mmv$shared_pages_in_jws
*copyc mmv$tables_initialized
*copyc mmv$time_to_call_mem_mgr
*copyc mmv$time_to_call_quick_sweep
*copyc mmv$total_contig_pages_assigned
*copyc mmv$write_aged_out_pages
*copyc mtv$monitor_segment_table
*copyc mtv$nos_segment_table_p
*copyc osv$180_memory_limits
*copyc osv$cpus_physically_configured
*copyc osv$keypoint_control
*copyc osv$mainframe_wired_cb_heap
*copyc osv$mainframe_wired_heap
*copyc osv$page_size
*copyc osv$time_to_check_asyn
*copyc tmv$cpu_execution_statistics
*copyc tmv$dispatching_control_sets
*copyc tmv$dispatching_controls
*copyc tmv$long_wait_force_swap_time
*copyc tmv$null_global_task_id
*copyc tmv$ptl_lock
*copyc tmv$ptl_p
*copyc tmv$timed_wait_not_queued

*if $true(mmc$debug)
*copyc mmv$test_reassign_asid
*ifend
?? TITLE := '  Global Declarations Declared by this module', EJECT ??

  TYPE
    ptr_type = record
      case b: 0 .. 3 of
      = 0 =
        st_p: ^mmt$segment_descriptor_table,
      = 1 =
        pva: ost$pva,
      = 2 =
        p: ^cell,
      = 3 =
        sdtx_p: ^mmt$segment_descriptor_table_ex,
      casend,
    recend;


  VAR
    mmv$ring1_request_trace: [XDCL, #GATE] array [0 .. 20] of integer,
    mmv$free_file_server_pages: [XDCL] boolean := FALSE,
    mmv$rma_unlock_scrap_count: [XDCL, #GATE, oss$mainframe_wired] integer := 0,
    mmv$io_error_q_age_interval: [XDCL, #GATE] integer := mmc$mmu_io_error_q_age_interval,
    mmv$jws_queue_age_interval: [XDCL, #GATE] integer := mmc$mmu_jws_age_interval,
    mmv$reduce_jws_for_thrashing: [XDCL] boolean := FALSE,
    mmv$shared_queue_age_interval: [XDCL, #GATE] integer := mmc$mmu_shared_age_interval,
    mmv$page_queue_age_cycle: mmt$page_queue_age_cycle := mmc$page_queue_age_cycle_min,
    mmv$quick_sweep_interval: [XDCL, #GATE] integer := 7fffffffffff(16), {turn it off until we know more ..
    mmv$periodic_call_interval: [XDCL, #GATE] integer := mmc$mmu_periodic_call_interval,
    mmv$searched_entire_pft: integer := 0,
    mmv$total_page_frames: [XDCL, #GATE] mmt$page_frame_index := 1500, {deadstart init resets exactly}
    mmv$trap_m: [XDCL] 0 .. 255 := 0,
    mmv$image_file: [XDCL, #GATE] mmt$image_file := [FALSE, * , * ],
    mmv$aging_algorithm: [XDCL, #GATE] integer := mmc$mmu_aging_algorithm,
    mmv$sq_mcount: [XDCL, #GATE, oss$mainframe_wired] integer := 0,
    mmv$sq_rcount: [XDCL, #GATE, oss$mainframe_wired] integer := 0,
    mmv$successful_error_retry: [XDCL, #GATE] integer := 0,
    syv$user_templates: [XDCL, #GATE] boolean := FALSE;

*if $true(mmc$debug)

  VAR
    mmv$test_pt_full: [XDCL] integer := 0;

*ifend

  VAR
    mmv$dynamic_aging_statistics: [XDCL, #GATE] mmt$dynamic_aging_statistics :=
          [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, [REP mmc$pq_shared_last_site - mmc$pq_shared_first + 1 of
          [0, 0, 0, 0, 0, 0, 0, 0, 0]]],
    mmv$dynamic_available_floor: [XDCL, #GATE, oss$mainframe_wired] integer :=
          mmc$mmu_dynamic_available_floor,
    mmv$dynamic_avail_emergency_min: [XDCL, #GATE, oss$mainframe_wired] integer :=
          mmc$mmu_dynamic_avail_emerg_min,
    mmv$dynamic_available_max: [XDCL, #GATE, oss$mainframe_wired] integer := mmc$mmu_dynamic_available_max,
    mmv$dynamic_available_min: [XDCL, #GATE, oss$mainframe_wired] integer := mmc$mmu_dynamic_available_min,
    mmv$dynamic_aging_enabled: [XDCL, #GATE, oss$mainframe_wired] boolean := FALSE,
    mmv$dynamic_aging_in_use: [XDCL, oss$mainframe_wired] boolean := FALSE,
    mmv$dynamic_neg_increment_max: [XDCL, #GATE, oss$mainframe_wired] integer := mmc$mmu_dynamic_neg_inc_max,
    mmv$dynamic_pos_increment_max: [XDCL, #GATE, oss$mainframe_wired] integer := mmc$mmu_dynamic_pos_inc_max;

  VAR
    v$time_mem_wait_q_scaned: [STATIC] integer := 5000000,
    v$time_avail_mod_wait_q_scaned: [STATIC] integer := 5000000;

?? TITLE := 'F$SYSTEM_JOBS_WORKING_SET', EJECT ??

  FUNCTION [INLINE, UNSAFE] f$system_jobs_working_set: mmt$page_frame_index;

    VAR
      ijle_p: ^jmt$initiated_job_list_entry;

    ijle_p := jmf$ijle_p (jmv$system_ijl_ordinal);
    f$system_jobs_working_set := ijle_p^.job_page_queue_list [mmc$pq_job_fixed].count +
          ijle_p^.job_page_queue_list [mmc$pq_job_io_error].count + ijle_p^.
          job_page_queue_list [mmc$pq_job_working_set].count;

  FUNCEND f$system_jobs_working_set;
?? OLDTITLE ??
?? NEWTITLE := 'P$DEQUEUE_TASKS_ON_MEM_LIMITS', EJECT ??

  PROCEDURE p$dequeue_tasks_on_mem_limits
    (    additional_reassignable_pages: integer);

    VAR
      count: integer,
      taskid: ost$global_task_id;

{Dequeue Tasks queued due to Low Memory
{  We dequeue 1 task for each page that was made available

    count := additional_reassignable_pages;
    WHILE (count > 0) AND (mmv$memory_wait_queue.head <> 0) AND (mmv$reassignable_page_frames.now > 0) DO
      tmp$dequeue_high_priority_task (mmv$memory_wait_queue, taskid);
      count := count - 1;
    WHILEND;

    IF ((#FREE_RUNNING_CLOCK (0) - v$time_mem_wait_q_scaned) > mmv$mem_wait_q_dequeue_interval) THEN

{Periodically dequeue the first task in the Q
      IF (mmv$memory_wait_queue.head <> 0) AND (mmv$reassignable_page_frames.now > 0) THEN
        tmp$dequeue_task (mmv$memory_wait_queue, taskid);
      IFEND;

      v$time_mem_wait_q_scaned := #FREE_RUNNING_CLOCK (0);
    IFEND;

{Dequeue Tasks queued due to Avail Mod overrun.

    IF mmv$avail_mod_wait_queue.head <> 0 THEN
      IF mmv$gpql [mmc$pq_avail_modified].pqle.count < mmv$avail_mod_q_floor_min THEN
        count := 0ffff(16); {Avail Mod dropped below the floor min, so ready all tasks

      ELSEIF count > 0 THEN
        count := (mmv$avail_modified_queue_max - mmv$gpql [mmc$pq_avail_modified].pqle.count) DIV
              mmv$avail_mod_q_decr_per_task;

      ELSEIF ((#FREE_RUNNING_CLOCK (0) - v$time_avail_mod_wait_q_scaned) >
            mmv$avail_mod_waitq_dequeue_int) THEN

{Periodically dequeue the first task in the Q
        tmp$dequeue_task (mmv$avail_mod_wait_queue, taskid);
        v$time_avail_mod_wait_q_scaned := #FREE_RUNNING_CLOCK (0);
      IFEND;

      WHILE (count > 0) AND (mmv$avail_mod_wait_queue.head <> 0) AND (mmv$reassignable_page_frames.now > 0) DO
        tmp$dequeue_high_priority_task (mmv$avail_mod_wait_queue, taskid);
        count := count - 1;
      WHILEND;
    IFEND;

  PROCEND p$dequeue_tasks_on_mem_limits;
?? OLDTITLE ??
?? NEWTITLE := 'MMP$UNLOCK_RMA_LIST - Unlock pages defined by rma list', EJECT ??
*copyc mmh$unlock_rma_list

  PROCEDURE [XDCL] mmp$unlock_rma_list
    (    iotype: iot$io_function;
         list_p: ^mmt$rma_list;
         list_length: mmt$rma_list_length;
         io_identifier: mmt$io_identifier;
         mf_job_file: boolean;
     VAR io_error: iot$io_error;
     VAR status: syt$monitor_status);

    VAR
      decrement_inhibit_swap: 0 .. mmc$max_rma_list_length,
      failed_job_name: jmt$system_supplied_name,
      ijle_p: ^jmt$initiated_job_list_entry,
      initial_reassignable_now: integer,
      list_i: mmt$rma_list_index,
      pfte_p: ^mmt$page_frame_table_entry,
      pte_p: ^ost$page_table_entry,
      pfti: mmt$page_frame_index,
      port: integer,
      taskid: ost$global_task_id,
      init_io_error: iot$io_error;

?? NEWTITLE := 'PROCESS_WRITE_FAILURE', EJECT ??

    PROCEDURE process_write_failure;

      VAR
        ijle_p: ^jmt$initiated_job_list_entry;

      pte_p^.v := TRUE;
      pfte_p^.io_error := init_io_error;
      IF pfte_p^.aste_p^.queue_id = mmc$pq_job_working_set THEN
        IF pfte_p^.aste_p^.sfid.residence = gfc$tr_system THEN
          {Note link to shared error q - required by job exit to
          {leave pages in memory correctly.
          mmp$relink_page_frame (pfti, mmc$pq_shared_io_error);
        ELSE
          ijle_p := jmf$ijle_p (pfte_p^.ijl_ordinal);

{  If this page is already in the swapped error queue then it has already been
{  put there as part of a transfer unit on an initial write.  If the job is
{  swapped pass the wait io complete state (JW) and the page is part of the job
{  working set leave it there and just set the modified bit.  We are OK unless
{  memory gets freed.  But since there is a page in the avail modified queue
{  that belongs to the same transfer unit in this write request that will be
{  put into the swapped error queue, when we swap in and reclaim it we can
{  reset the modified bit for all pages in this transfer unit.

          IF (pfte_p^.queue_id <> mmc$pq_swapped_io_error) THEN
            IF ijle_p^.swap_status <= jmc$iss_wait_job_io_complete THEN
              mmp$relink_page_frame (pfti, mmc$pq_job_io_error);
            ELSEIF (pfte_p^.queue_id = mmc$pq_avail_modified) THEN
              mmp$relink_page_frame (pfti, mmc$pq_swapped_io_error);
              ijle_p^.delayed_swapin_work := ijle_p^.delayed_swapin_work +
                    $jmt$delayed_swapin_work [jmc$dsw_io_error_while_swapped];
            IFEND;
          IFEND;

{  If this is an initial write that failed we must remove any other pages in the
{  transfer unit that might have been queued for IO after this request was made.

          IF (io_error = ioc$error_on_init) OR (io_error = ioc$unit_down_on_init) THEN
            remove_pages_in_tu (ijle_p, pfte_p);
          IFEND;
          io_error := ioc$no_error;

        IFEND;
      ELSE
        mmp$relink_page_frame (pfti, mmc$pq_shared_io_error);
      IFEND;
      pte_p^.m := TRUE; {Must be after RELINK}

    PROCEND process_write_failure;
?? OLDTITLE ??
?? NEWTITLE := 'PROCESS_REWRITE_SUCCESS', EJECT ??

    PROCEDURE process_rewrite_success;

      VAR
        ijle_p: ^jmt$initiated_job_list_entry;

      IF pfte_p^.active_io_count = 0 THEN
        WHILE pfte_p^.task_queue.head <> 0 DO
          tmp$dequeue_task (pfte_p^.task_queue, taskid);
        WHILEND;
        {Page that was in error queue has been written correctly !!
        pfte_p^.io_error := ioc$no_error;
        mmv$successful_error_retry := mmv$successful_error_retry + 1;

{ If page is in available modified, move it to the correct queue.  If the page
{ is part of the working set and the job is not swapped pass the wait_io_complete
{ state, move it to the correct queue.  Else leave it were it is.  Otherwise
{ mmv$reassignable_page_frames.soon will not be decremented correctly.

        IF NOT mmv$pt_p^ [pfte_p^.pti].m THEN
          ijle_p := jmf$ijle_p (pfte_p^.ijl_ordinal);
          IF (ijle_p^.swap_status <= jmc$iss_wait_job_io_complete) OR
                (pfte_p^.queue_id = mmc$pq_avail_modified) THEN
            pte_p^.v := FALSE;
            mmp$sva_purge_one_page_map (pfte_p^.sva); {Essential for dual CPU}
            IF mmv$no_memory_buffering THEN
              mmp$delete_pt_entry (pfti, TRUE);
              mmp$relink_page_frame (pfti, mmc$pq_free);
            ELSE
              mmp$relink_page_frame (pfti, mmc$pq_avail);
            IFEND;
          IFEND;
        IFEND;
      IFEND;

    PROCEND process_rewrite_success;
?? OLDTITLE ??
?? NEWTITLE := 'REMOVE_PAGES_IN_TU', EJECT ??

    PROCEDURE remove_pages_in_tu
      (    ijle_p: ^jmt$initiated_job_list_entry;
           init_pfte_p: ^mmt$page_frame_table_entry);

      CONST
        allow_allocation = TRUE;

      VAR
        ajlo: jmt$ajl_ordinal,
        boffset: integer,
        eoffset: integer,
        fde_p: gft$file_desc_entry_p,
        info: dmt$chapter_info,
        pfte_p: ^mmt$page_frame_table_entry,
        pfti: mmt$page_frame_index,
        pte_p: ^ost$page_table_entry;

      IF ijle_p^.swap_status >= jmc$iss_free_swapped_memory THEN
        IF (init_pfte_p^.sva.offset - dmc$max_transfer_size) < 0 THEN
          boffset := 0;
        ELSE
          boffset := init_pfte_p^.sva.offset - dmc$max_transfer_size;
        IFEND;
        IF ((init_pfte_p^.sva.offset - dmc$max_transfer_size) <= osc$max_segment_length) THEN
          eoffset := init_pfte_p^.sva.offset + dmc$max_transfer_size;
        ELSE
          eoffset := osc$max_segment_length;
        IFEND;
      ELSE
        jmp$lock_ajl (ijle_p, init_pfte_p^.aste_p^.ijl_ordinal, ajlo);

        gfp$mtr_get_fde_p (init_pfte_p^.aste_p^.sfid, ijle_p, fde_p);
        boffset := init_pfte_p^.sva.offset DIV fde_p^.allocation_unit_size * fde_p^.allocation_unit_size;
        eoffset := boffset + fde_p^.allocation_unit_size;

        jmp$unlock_ajl (ijle_p);
      IFEND;

      pfti := init_pfte_p^.aste_p^.pft_link.fwd;

      WHILE pfti <> 0 DO
        pfte_p := ^mmv$pft_p^ [pfti];
        IF (pfte_p^.sva.offset >= boffset) AND (pfte_p^.sva.offset < eoffset) AND
              (pfte_p^.active_io_count <> 0) THEN
          IF pfte_p^.locked_page = mmc$lp_page_in_lock THEN
            mmp$delete_pt_entry (pfti, TRUE);
            mmp$relink_page_frame (pfti, mmc$pq_free);
          ELSE

{  Assume write - other iotypes are not used with local files.
{  If this page is already in the swapped error queue then it was put there as part
{  of a transfer unit on another initial write.  The job is swapped and memory has
{  been freed so the maximum transfer size was used.
{  If the job is swapped pass the wait job io complete state and the page is part of
{  the job working set leave the page where it is and just set the modified bit.
{  If memory gets freed, all pages in the transfer unit will get their modified bit
{  reset when we swap in and reclaim io error pages.

            IF (pfte_p^.queue_id <> mmc$pq_swapped_io_error) THEN
              pte_p := ^mmv$pt_p^ [pfte_p^.pti];
              pte_p^.v := TRUE;
              IF ijle_p^.swap_status < jmc$iss_job_io_complete THEN
                mmp$relink_page_frame (pfti, mmc$pq_job_io_error);
              ELSEIF (pfte_p^.queue_id = mmc$pq_avail_modified) THEN
                mmp$relink_page_frame (pfti, mmc$pq_swapped_io_error);
              IFEND;
              pte_p^.m := TRUE;
            IFEND;
          IFEND;
        IFEND;
        pfti := mmv$pft_p^ [pfti].segment_link.fwd;
      WHILEND;

    PROCEND remove_pages_in_tu;
?? OLDTITLE ??
?? NEWTITLE := 'REPORT_RMA_UNLOCK_ERROR', EJECT ??

    PROCEDURE report_rma_unlock_error;

      VAR
        msg: string (65);

{ We can only report the error here and continue. We don't know who the pfte entry belongs to
{ without adding further code to the response packet. Maybe will do that in the future if
{ this error occurs very often.

      mmv$rma_unlock_scrap_count := mmv$rma_unlock_scrap_count + 1;
      msg := 'Unlock_rma_list failure on page frame table index ';
      dpp$convert_int_to_str_hex (4, pfti, msg (51, * ));
      dpp$display_error (msg);

    PROCEND report_rma_unlock_error;
?? OLDTITLE ??
?? EJECT ??

    status.normal := TRUE;
    port := 0;

    init_io_error := io_error;
    initial_reassignable_now := mmv$reassignable_page_frames.now;

    decrement_inhibit_swap := list_length;
    IF (iotype = ioc$swap_out) OR (iotype = ioc$swap_in) THEN
      ijle_p := jmf$ijle_p (io_identifier.ijl_ordinal);
      IF io_error <> ioc$no_error THEN
        ijle_p^.swap_data.swapping_io_error := io_error;
      IFEND;

    ELSE { Not swap io }

    /unlock_pages/
      FOR list_i := 1 TO list_length DO
        IF list_p^ [list_i].length = 0 THEN
          EXIT /unlock_pages/; {----->
        IFEND;
        pfti := list_p^ [list_i].rma DIV osv$page_size;

        pfte_p := ^mmv$pft_p^ [pfti];
        IF pfte_p^.active_io_count = 0 THEN
{         mtp$error_stop ('mm - unlock rma list error');
          report_rma_unlock_error;

{ Just exit without doing anything else.

          RETURN; {----->
        IFEND;

        pfte_p^.active_io_count := pfte_p^.active_io_count - 1;

        IF pfte_p^.queue_id = mmc$pq_free THEN
          IF pfte_p^.active_io_count = 0 THEN
            IF (io_error <> ioc$no_error) AND (iotype = ioc$keypoint_io) THEN
              osp$process_keypoint_io_error;
            IFEND;
            mmp$link_page_frame_to_queue (pfti, pfte_p);
          IFEND;
        ELSE
          pte_p := ^mmv$pt_p^ [pfte_p^.pti];

          IF (iotype = ioc$write_locked_page) AND (pfte_p^.queue_id < mmc$pq_first_valid_in_pt) THEN
            mtp$error_stop ('MM - unlock rmal, bad queue');
          IFEND;

          CASE iotype OF
          = ioc$no_io =
            ;
          = ioc$explicit_write, ioc$write_mass_storage, ioc$initialize_sectors, ioc$write_to_client =
            ;
          = ioc$explicit_read, ioc$read_uft, ioc$read_mass_storage, ioc$explicit_read_no_purge =
            IF (list_i = 1) AND (iotype <> ioc$explicit_read_no_purge) THEN
              mmp$purge_all_cache;
            IFEND;
            IF (pfte_p^.queue_id = mmc$pq_avail_modified) AND NOT pte_p^.m THEN
              mmv$reassignable_page_frames.soon := mmv$reassignable_page_frames.soon - 1;
            IFEND;
            pte_p^.m := TRUE;

          = ioc$read_from_client =
{!          We will decide whether or not to do this when we support active readers on the server with
{!          writers on (multiple) clients.
{!          IF list_i = 1 THEN
{!            mmp$purge_all_cache;
{!          IFEND;
            pfte_p^.locked_page := mmc$lp_not_locked;
            IF io_error = ioc$no_error THEN
              IF pfte_p^.queue_id >= mmc$pq_first_valid_in_pt THEN
                pte_p^.v := TRUE;
              ELSEIF NOT pte_p^.m THEN
                mmv$reassignable_page_frames.soon := mmv$reassignable_page_frames.soon - 1;
              IFEND;
              pte_p^.u := TRUE;
              pte_p^.m := TRUE;
            ELSE
              mmp$unlock_rma_list_error (pfti, pfte_p, io_error);
            IFEND;

          = ioc$read_page, ioc$read_for_server, ioc$read_ahead_on_server =
            ?IF mmc$debug_rma_list THEN
              IF pte_p^.v THEN
                mtp$error_stop ('MM - unlock rmal, read to valid page');
              IFEND;
            ?IFEND;
            pfte_p^.locked_page := mmc$lp_not_locked;
            IF io_error = ioc$no_error THEN
              IF pfte_p^.queue_id >= mmc$pq_first_valid_in_pt THEN
                pte_p^.v := TRUE;
                pte_p^.u := TRUE;
              IFEND;
            ELSE
              mmp$unlock_rma_list_error (pfti, pfte_p, io_error);
            IFEND;

          = ioc$allocate =
            pfte_p^.locked_page := mmc$lp_not_locked;
            IF io_error = ioc$no_error THEN
              IF pfte_p^.queue_id >= mmc$pq_first_valid_in_pt THEN
                pte_p^.v := TRUE;
              IFEND;
            ELSE
              mmp$unlock_rma_list_error (pfti, pfte_p, io_error);
            IFEND;

          = ioc$write_page, ioc$write_locked_page, ioc$write_for_server =
            IF iotype = ioc$write_locked_page THEN
              IF pfte_p^.queue_id >= mmc$pq_first_valid_in_pt THEN
                pte_p^.v := TRUE;
              IFEND;
              pfte_p^.locked_page := mmc$lp_not_locked;
            IFEND;
            IF init_io_error <> ioc$no_error THEN
              process_write_failure;
            ELSEIF pfte_p^.io_error <> ioc$no_error THEN
              process_rewrite_success;
            IFEND;

          ELSE
            mtp$error_stop ('MM - bad IO type unlock_rma_list');
          CASEND;
        IFEND;

{ If the page is no longer locked (all IO complete), there is some more processing to be done:
{ Dequeue all tasks waiting for the IO to complete.

        IF pfte_p^.active_io_count = 0 THEN
          WHILE pfte_p^.task_queue.head <> 0 DO
            tmp$dequeue_task (pfte_p^.task_queue, taskid);
          WHILEND;

{  If the page is in the available modified queue and has been sucessfully written to disk,
{  the page should be moved to the available queue. (Debugging option allows for available
{  queue to be disabled).

          IF (pfte_p^.queue_id = mmc$pq_avail_modified) THEN
            IF NOT mmv$pt_p^ [pfte_p^.pti].m THEN
              IF mmv$no_memory_buffering THEN
                mmp$delete_pt_entry (pfti, TRUE);
                mmp$relink_page_frame (pfti, mmc$pq_free);
              ELSE
                mmp$relink_page_frame (pfti, mmc$pq_avail);
              IFEND;
            IFEND;

            {  If the page was being used by file server on the SERVER and and has been sucessfully
            {  written to disk/client, remove the page from the shared queue if possible. NOTE that
            { the page cannot be removed if it is modified. Also note that since the page is VALID
            { the MODIFIED BIT in the page table cannot be examined until the VALID BIT
            { is cleared and the page maps purged.

          ELSEIF (pfte_p^.queue_id >= mmc$pq_shared_first) AND
                (pfte_p^.queue_id <= mmc$pq_shared_last) AND ((iotype = ioc$write_to_client) OR
                (iotype = ioc$write_for_server)) THEN
            pte_p^.v := FALSE;
            mmp$sva_purge_one_page_map (pfte_p^.sva); {Essential for dual CPU}
            IF pte_p^.m THEN
              pte_p^.v := TRUE;
            ELSEIF mmv$free_file_server_pages THEN
              mmp$delete_pt_entry (pfti, TRUE);
              mmp$relink_page_frame (pfti, mmc$pq_free);
            ELSE
              mmp$relink_page_frame (pfti, mmc$pq_avail);
            IFEND;
          IFEND;
        IFEND;

      FOREND /unlock_pages/;

      ijle_p := jmf$ijle_p (pfte_p^.ijl_ordinal);

{  If the IJL entry has been freed get the heck out of here before we crash.
      IF ijle_p^.entry_status = jmc$ies_entry_free THEN
        RETURN; {----->
      IFEND;

{  If IO was a "write" of a local file, dont decrement inhibit swap.
{  If an IO error occurred on a swapped job, set the task ready so it can swap in and reclaim
{  its error pages.

      IF (mf_job_file) AND ((iotype = ioc$write_page) OR (iotype = ioc$write_locked_page)) THEN
        decrement_inhibit_swap := 0;
        IF (init_io_error <> ioc$no_error) AND (ijle_p^.entry_status <> jmc$ies_entry_free) AND
              (ijle_p^.swap_status >= jmc$iss_job_io_complete) THEN
          tmp$set_task_ready (ijle_p^.job_monitor_taskid, 0 {readying_task_priority} ,
                tmc$rc_ready_conditional_wi);
        IFEND;
      IFEND;


    IFEND; { Not swap io }

{ The active_io_page_count must always be accurate but the number of active_io_requests can sometimes
{ be incorrect.  If pages are moved from a JWS to the shared working set, the active_io_page_count is
{ modified to reflect the move but the active_io_requests count can not be modified because it is not
{ known how many IO requests were made.  Therefore, the code here must ensure active_io_requests does
{ not become negative and if the active_io_active count becomes zero then zero active_io_requests.  If
{ active_io_count is non-zero then we know there must be at least one IO request outstanding therefore
{ do not decrement active_io_requests if it is not greater than one.
{ This will correct the active_io_requests count if it was incorrect.  The only use of the requests
{ count is to slowdown the task if it initiates too many IO requests while at MAXWS.

    IF ijle_p^.active_io_page_count < list_length THEN
{     mtp$error_stop ('MM-NEGATIVE IO COUNT IN UNLOCK RMA');
      mmv$rma_unlock_scrap_count := mmv$rma_unlock_scrap_count + 1;
      ijle_p^.lost_io_count := list_length;
      failed_job_name := ijle_p^.system_supplied_name;
      dpp$display_error ('Failure in unlock_rma_list. The following job is suspect');
      dpp$display_error (failed_job_name);
      RETURN; {----->
{ Exit here without dequeuing any tasks as they could be either screwed up or not even involved.
{ If enough of these errors occur then we will look at further code.

    IFEND;

    ijle_p^.inhibit_swap_count := ijle_p^.inhibit_swap_count - decrement_inhibit_swap;
    ijle_p^.active_io_page_count := ijle_p^.active_io_page_count - list_length;
    IF ijle_p^.active_io_page_count = 0 THEN
      ijle_p^.active_io_requests := 0;
    ELSEIF ijle_p^.active_io_requests > 1 THEN
      ijle_p^.active_io_requests := ijle_p^.active_io_requests - 1;
    IFEND;
    IF (ijle_p^.inhibit_swap_count = 0) AND (ijle_p^.notify_swapper_when_io_complete) THEN
      jsp$io_complete (ijle_p);
    IFEND;

    IF (mmv$memory_wait_queue.head <> 0) OR (mmv$avail_mod_wait_queue.head <> 0) THEN
      p$dequeue_tasks_on_mem_limits (mmv$reassignable_page_frames.now - initial_reassignable_now);
    IFEND;

    mmp$check_queues;

  PROCEND mmp$unlock_rma_list;
?? OLDTITLE ??
?? NEWTITLE := 'MMP$UNLOCK_RMA_LIST_ERROR', EJECT ??

{This procedure is called to process an IO error on a page-in request.

  PROCEDURE [XDCL] mmp$unlock_rma_list_error
    (    pfti: mmt$page_frame_index;
         pfte_p: ^mmt$page_frame_table_entry;
         io_error: iot$io_error);

    VAR
      delete_pt_entry_ok: boolean,
      ijle_p: ^jmt$initiated_job_list_entry,
      monitor_fault: ost$monitor_fault,
      sac_p: ^mmt$segment_access_condition,
      sdte_segment_number: ost$segment,
      sdtxe_p: ^mmt$segment_descriptor_extended,
      status: syt$monitor_status,
      taskid: ost$global_task_id,
      xcb_p: ^ost$execution_control_block;

    monitor_fault.identifier := mmc$segment_fault_processor_id;
    sac_p := #LOC (monitor_fault.contents);
    sac_p^.identifier := mmc$sac_io_read_error;
    delete_pt_entry_ok := TRUE;

    tmp$get_taskid_from_task_queue (pfte_p^.task_queue, taskid);
    WHILE taskid <> tmv$null_global_task_id DO
      tmp$get_xcb_p (taskid, xcb_p, ijle_p);
      IF xcb_p <> NIL THEN
        IF io_error = ioc$unrecovered_error_unit_down THEN
          mmp$process_volume_unavailable (xcb_p, FALSE);
        ELSEIF io_error = ioc$server_allocation_error THEN
          sdte_segment_number := #SEGMENT (xcb_p^.page_wait_info.pva);
          sdtxe_p := mmp$get_sdtx_entry_p (xcb_p, sdte_segment_number);
          sdtxe_p^.assign_active := 0;
          tmp$set_monitor_flag (taskid, mmc$mf_segment_mgr_flag, status);
          IF (xcb_p^.xp.trap_enable <> osc$traps_enabled) OR (xcb_p^.xp.p_register.pva.ring = 1) THEN
            delete_pt_entry_ok := FALSE;
            mmv$pt_p^ [pfte_p^.pti].v := TRUE;
          IFEND;
        ELSEIF io_error = ioc$server_has_terminated THEN
          sac_p^.identifier := mmc$sac_file_server_terminated;
          IF xcb_p^.page_wait_info.pva <> NIL THEN
            sac_p^.segment := xcb_p^.page_wait_info.pva;
            tmp$send_monitor_fault (taskid, #LOC (monitor_fault), TRUE);
          IFEND;
        ELSE {io_error <> ioc$unrecovered_error_unit_down, io_error <> ioc$server_allocation_error}
          IF xcb_p^.page_wait_info.pva <> NIL THEN
            sac_p^.segment := xcb_p^.page_wait_info.pva;
            tmp$send_monitor_fault (taskid, #LOC (monitor_fault), TRUE);
          IFEND;
        IFEND;

        jmp$unlock_ajl (ijle_p);
      IFEND;

      tmp$find_next_queued_task (taskid);
    WHILEND;

    IF NOT delete_pt_entry_ok THEN
      RETURN; {----->
    IFEND;

    mmp$delete_pt_entry (pfti, TRUE);
    mmp$relink_page_frame (pfti, mmc$pq_free);

  PROCEND mmp$unlock_rma_list_error;

?? TITLE := 'MMP$BUILD_LOCK_RMA_LIST - Build and lock pages defined by rma list' ??
?? EJECT ??

{--------------------------------------------------------------------------------------------------------
*copyc mmh$build_lock_rma_list
{--------------------------------------------------------------------------------------------------------

  PROCEDURE [XDCL] mmp$build_lock_rma_list
    (    buffer_descriptor: mmt$buffer_descriptor;
         length: ost$byte_count;
         iotype: iot$io_function;
         list_p: ^mmt$rma_list;
         list_length: mmt$rma_list_length;
     VAR status: syt$monitor_status);

    VAR
      found: boolean,
      hash_count: 1 .. 32,
      ijle_p: ^jmt$initiated_job_list_entry,
      ijl_ordinal: jmt$ijl_ordinal,
      index: integer,
      io_error: iot$io_error,
      ioid: mmt$io_identifier,
      jf_asid: ost$asid,
      purge_map: boolean,
      list_i: mmt$rma_list_index,
      mf_job_file: boolean,
      osv$keypoint_periodic_lpid: [XREF] integer,
      page_count: integer,
      page_offset: 0 .. 65535,
      pfte_p: ^mmt$page_frame_table_entry,
      pfti: mmt$page_frame_index,
      pti: integer,
      spde_p: ^jst$swapped_page_descriptor,
      sva: ost$system_virtual_address;

    status.normal := TRUE;
    list_i := 1;

{  Lock the pages depending on format of the buffer descriptor.

    CASE buffer_descriptor.buffer_descriptor_type OF
    = mmc$bd_paging_io, mmc$bd_explicit_io =
      sva := buffer_descriptor.sva;
      page_offset := sva.offset MOD osv$page_size;
      page_count := ((page_offset + length - 1) DIV osv$page_size) + 1;
      IF (list_length < page_count) OR (list_length = 0) THEN
        mtp$error_stop ('MM - lock rmal, list too small');
      IFEND;
      purge_map := FALSE;

    /lp/
      WHILE TRUE DO
        IF iotype <> ioc$keypoint_io THEN
          #HASH_SVA (sva, pti, hash_count, found);
          IF NOT found THEN
            EXIT /lp/ {----->
          IFEND;
          pfti := (mmv$pt_p^ [pti].rma * 512) DIV osv$page_size;
        ELSE
          pfti := osv$keypoint_control.cpus [osv$keypoint_periodic_lpid].io_pfti [list_i];
        IFEND;
        pfte_p := ^mmv$pft_p^ [pfti];
        pfte_p^.active_io_count := pfte_p^.active_io_count + 1;
        ?IF mmc$debug_rma_list THEN
          IF list_i = 1 THEN
            ijl_ordinal := pfte_p^.ijl_ordinal;
          ELSEIF ijl_ordinal <> pfte_p^.ijl_ordinal THEN
            mtp$error_stop ('MM - lock rmal, mixed ijl ordinal in lock');
          IFEND;
          IF ((iotype = ioc$write_page) OR (iotype = ioc$write_locked_page)) AND NOT mmv$pt_p^ [pti].m THEN
            mtp$error_stop ('MM - lock rmal, write page error');
          IFEND;
          IF (iotype = ioc$read_page) AND ((pfte_p^.queue_id < mmc$pq_first_valid_in_pt) OR mmv$pt_p^ [pti].
                v) THEN
            mtp$error_stop ('MM - lock rmal, read page error');
          IFEND;
        ?IFEND;

        CASE iotype OF
        = ioc$explicit_read, ioc$read_uft, ioc$read_mass_storage, ioc$explicit_write, ioc$write_mass_storage,
              ioc$initialize_sectors, ioc$explicit_read_no_purge =
          IF pfte_p^.queue_id = mmc$pq_avail THEN
            mmp$relink_page_frame (pfti, pfte_p^.aste_p^.queue_id);
            mmv$pt_p^ [pti].v := TRUE;
          IFEND;
        = ioc$write_to_client =
          IF pfte_p^.locked_page = mmc$lp_page_in_lock THEN
            pfte_p^.active_io_count := pfte_p^.active_io_count - 1;
            EXIT /lp/; {----->
          IFEND;
          IF pfte_p^.queue_id = mmc$pq_avail THEN
            mmp$relink_page_frame (pfti, pfte_p^.aste_p^.queue_id);
            mmv$pt_p^ [pti].v := TRUE;
          IFEND;
        = ioc$read_from_client =
          IF NOT mmv$pt_p^ [pti].v THEN
            pfte_p^.locked_page := mmc$lp_page_in_lock;
          IFEND;
        = ioc$no_io =
        = ioc$read_page, ioc$read_for_server, ioc$read_ahead_on_server =
          pfte_p^.locked_page := mmc$lp_page_in_lock;
        = ioc$allocate =
          pfte_p^.locked_page := mmc$lp_server_allocate_lock;
        = ioc$write_page, ioc$write_for_server =
          mmv$pt_p^ [pti].m := FALSE;
          IF pfte_p^.queue_id = mmc$pq_avail_modified THEN
            mmv$reassignable_page_frames.soon := mmv$reassignable_page_frames.soon + 1;
          ELSEIF mmv$pt_p^ [pti].v THEN
            purge_map := TRUE;
          IFEND;
        = ioc$write_locked_page =
          pfte_p^.locked_page := mmc$lp_write_protected_lock;
          IF pfte_p^.queue_id = mmc$pq_avail_modified THEN
            mmp$relink_page_frame (pfti, pfte_p^.aste_p^.queue_id);
          ELSEIF mmv$pt_p^ [pti].v THEN
            mmv$pt_p^ [pti].v := FALSE;
            purge_map := TRUE;
          IFEND;
          mmv$pt_p^ [pti].m := FALSE;
        = ioc$keypoint_io =
          mmp$relink_page_frame (pfti, mmc$pq_free);
        ELSE
          mtp$error_stop ('MM - bad IO type lock_rma_list');
        CASEND;
        IF (pfte_p^.queue_id <= mmc$pq_last_reassignable) AND (iotype <> ioc$keypoint_io) THEN
          mtp$error_stop ('MM - Tried to ioc$write_page in AVAIL');
        IFEND;

        list_p^ [list_i].rma := pfti * osv$page_size + page_offset;
        page_count := page_count - 1;
        IF page_count <= 0 THEN
          list_p^ [list_i].length := ((buffer_descriptor.sva.offset + length - 1) MOD
                osv$page_size) - page_offset + 1;
          ijle_p := jmf$ijle_p (pfte_p^.ijl_ordinal);
          ijle_p^.active_io_page_count := ijle_p^.active_io_page_count + list_length;
          ijle_p^.active_io_requests := ijle_p^.active_io_requests + 1;
          IF (pfte_p^.aste_p^.sfid.residence <> gfc$tr_job) OR
                ((iotype <> ioc$write_page) AND (iotype <> ioc$write_locked_page)) THEN
            ijle_p^.inhibit_swap_count := ijle_p^.inhibit_swap_count + list_length;
          IFEND;
          IF list_i < list_length THEN
            list_p^ [list_i + 1].length := 0;
          IFEND;
          IF purge_map THEN
            mmp$purge_all_page_map;
          IFEND;
          RETURN; {<-----}
        IFEND;
        list_p^ [list_i].length := osv$page_size - page_offset;
        sva.offset := sva.offset + osv$page_size;
        page_offset := 0;
        list_i := list_i + 1;
      WHILEND /lp/;


{Control gets here only if a page frame is not assigned to a page that is being locked.  Unlock the pages (if
{any) that have already been locked.

      IF list_i > 1 THEN
        ijle_p := jmf$ijle_p (pfte_p^.ijl_ordinal);
        ijle_p^.active_io_page_count := ijle_p^.active_io_page_count + list_i - 1;
        ijle_p^.active_io_requests := ijle_p^.active_io_requests + 1;
        IF (pfte_p^.aste_p^.sfid.residence <> gfc$tr_job) OR
              ((iotype <> ioc$write_page) AND (iotype <> ioc$write_locked_page)) THEN
          ijle_p^.inhibit_swap_count := ijle_p^.inhibit_swap_count + list_i - 1;
          mf_job_file := FALSE;
        ELSE
          mf_job_file := TRUE;
        IFEND;
        io_error := ioc$no_error;
        ioid.specified := FALSE;
        mmp$unlock_rma_list (ioc$no_io, list_p, list_i - 1, ioid, mf_job_file, io_error, status);
      IFEND;
      mtp$set_status_abnormal ('MM', mme$page_frame_not_assigned, status);


    = mmc$bd_job_swapping_io =

{  Lock pages for job swapping io.

      ijl_ordinal := buffer_descriptor.ijl_ordinal;
      ijle_p := jmf$ijle_p (ijl_ordinal);
      page_count := list_length;
      ijle_p^.inhibit_swap_count := ijle_p^.inhibit_swap_count + page_count;
      ijle_p^.active_io_page_count := ijle_p^.active_io_page_count + page_count;
      ijle_p^.active_io_requests := ijle_p^.active_io_requests + 1;

      IF ijle_p^.swap_io_control.spd_index = 0 THEN
        ijle_p^.swap_io_control.next_queue_id := SUCC (mmc$pq_job_fixed);
        ijle_p^.swap_io_control.next_pfti := ijle_p^.job_page_queue_list [mmc$pq_job_fixed].link.bkw;
        ijle_p^.swap_io_control.stop_pfti := ijle_p^.swap_io_control.swap_file_descriptor_pfti;
        ijle_p^.swap_data.swapping_io_error := ioc$no_error;
      IFEND;

      jf_asid := ijle_p^.job_fixed_asid;

      WHILE TRUE DO
        IF ijle_p^.swap_io_control.next_pfti = ijle_p^.swap_io_control.stop_pfti THEN
          IF ijle_p^.swap_io_control.next_queue_id = mmc$pq_job_fixed THEN
            ijle_p^.swap_io_control.next_pfti := ijle_p^.swap_io_control.swap_file_descriptor_pfti;
          ELSEIF (ijle_p^.swap_io_control.next_queue_id = SUCC (mmc$pq_job_fixed)) AND
                (ijle_p^.swap_io_control.stop_pfti = 0) THEN
            mtp$error_stop ('MM - error in locking swap file pages');
          ELSE
            ijle_p^.swap_io_control.stop_pfti := 0;
            ijle_p^.swap_io_control.next_pfti := ijle_p^.job_page_queue_list
                  [ijle_p^.swap_io_control.next_queue_id].link.bkw;
          IFEND;
          IF ijle_p^.swap_io_control.next_queue_id = UPPERVALUE (mmt$job_page_queue_index) THEN
            ijle_p^.swap_io_control.next_queue_id := mmc$pq_job_fixed;
          ELSE
            ijle_p^.swap_io_control.next_queue_id := SUCC (ijle_p^.swap_io_control.next_queue_id);
          IFEND;
        IFEND;

        WHILE ijle_p^.swap_io_control.next_pfti <> ijle_p^.swap_io_control.stop_pfti DO
          pfte_p := ^mmv$pft_p^ [ijle_p^.swap_io_control.next_pfti];
          ?IF mmc$debug_rma_list THEN
            IF ijl_ordinal <> pfte_p^.ijl_ordinal THEN
              mtp$error_stop ('MM - mixed ijl ordinal in swap');
            IFEND;
          ?IFEND;
          IF iotype = ioc$swap_out THEN
            spde_p := ^ijle_p^.sfd_p^.swapped_page_descriptors [ijle_p^.swap_io_control.spd_index];
            spde_p^.pft_entry := pfte_p^;
            spde_p^.page_table_entry := mmv$pt_p^ [pfte_p^.pti];
            spde_p^.ast_entry := pfte_p^.aste_p^;

{ The entry_updated field in the swapped page descriptor is set to TRUE for job fixed pages, FALSE for
{ all other pages.  Setting the field helps reset_swapped_job_mm_tables in swapper to differentiate
{ between job fixed pages and other fixed pages when checking/changing ASIDs.

            spde_p^.entry_updated := (spde_p^.pft_entry.sva.asid = jf_asid);
          IFEND;
          list_p^ [list_i].rma := ijle_p^.swap_io_control.next_pfti * osv$page_size;
          list_p^ [list_i].length := osv$page_size;

          ijle_p^.swap_io_control.spd_index := ijle_p^.swap_io_control.spd_index + 1;
          ijle_p^.swap_io_control.next_pfti := pfte_p^.link.bkw;

          page_count := page_count - 1;
          IF page_count = 0 THEN
            RETURN; {----->
          IFEND;
          list_i := list_i + 1;
        WHILEND;

      WHILEND;
    ELSE
      mtp$error_stop ('MM - bad bufr desc in lock_rma_list');
    CASEND;

    mmp$check_queues;

  PROCEND mmp$build_lock_rma_list;

?? TITLE := 'MMP$BUILD_LOCK_RMA_LIST_TAPE - Build and lock pages defined by tape request' ??
?? EJECT ??

{--------------------------------------------------------------------------------------------------------
{ Purpose:
{   This procedure is a build_lock_rma_list routine that is customized for NOS/VE tape I/O.
{   This allows all the pages associated with a tape request to be locked in one call from
{   iom$tape_queue_manager_mtr.  This replaces the use of mmp$build_lock_rma_list, which
{   required up to 30 calls for write requests and 60 calls for read requests.
{
{ Notes:
{   Although buffer_descriptor is not a parameter to this routine, the buffer_descriptor_type
{   is assumed to be mmc$bd_explicit_io.  Tape I/O does not use anything else.  If any changes
{   are made to mmp$build_lock_rma_list relating to mmc$bd_explicit_io, those changes may
{   also be needed here.
{
{   The only io_type values ever used for locking pages are ioc$explicit_read and
{   ioc$explicit_write.  When the pages are unlocked, io_type can be ioc$explicit_read_no_purge
{   if cache purge is not required.
{
{   The parameter tape_request_p points to the wired tape request for which pages need to
{   be locked and rma lists built.  The request is both an input and output parameter.
{   For writes and reads, the data buffers for the request are locked.  For reads the
{   first store transfer count buffer is also locked.  Since all transfer count buffers must
{   be on the same memory page, it is not necessary to lock more than one.
{
{   If page frames are not assigned to the entire range of addresses specified in the
{   request, no frames are locked and an error code will be returned.
{--------------------------------------------------------------------------------------------------------

  PROCEDURE [XDCL] mmp$build_lock_rma_list_tape
    (    tape_request_p: ^iot$wired_tape_request; {input/output
     VAR status: syt$monitor_status);

    VAR
      command_index: iot$tape_command_index,
      found: boolean,
      hash_count: 1 .. 32,
      ignore_status: syt$monitor_status,
      ijle_p: ^jmt$initiated_job_list_entry,
      ijl_ordinal: jmt$ijl_ordinal,
      io_error: iot$io_error,
      ioid: mmt$io_identifier,
      length: ost$byte_count,
      list_i: mmt$rma_list_index,
      list_p: ^mmt$rma_list,
      loop_count: 1 .. 2,
      loop_count_index: 1 .. 2,
      page_count: integer,
      page_offset: 0 .. 65535,
      pfte_p: ^mmt$page_frame_table_entry,
      pfti: mmt$page_frame_index,
      pti: integer,
      pva: ^cell,
      rma: integer,
      sva: ost$system_virtual_address,
      total_list_entries: mmt$rma_list_index;

    status.normal := TRUE;
    total_list_entries := 1;
    list_p := #LOC (tape_request_p^.wired_command_heap_p^.rma_list [1]);
    loop_count := 1;
    IF tape_request_p^.io_type = ioc$explicit_read THEN
      loop_count := 2; { must lock data buffer(s) and store transfer count buffer
    IFEND;

    FOR loop_count_index := 1 TO loop_count DO

    /lock_loop/
      FOR command_index := 1 TO tape_request_p^.no_of_data_commands DO
        list_i := 1;
        IF tape_request_p^.io_type = ioc$explicit_read THEN
          IF loop_count_index = 1 THEN {data buffer
            length := tape_request_p^.max_input_count;
            pva := tape_request_p^.wired_read_description_p^ [command_index].buffer_area;
          ELSE {store transfer count buffer
            length := 8;
            pva := tape_request_p^.wired_read_description_p^ [command_index].block_transfer_length;
          IFEND;
        ELSE {ioc$explicit_write
          length := tape_request_p^.wired_write_description_p^ [command_index].transfer_length;
          pva := tape_request_p^.wired_write_description_p^ [command_index].buffer_area;
        IFEND;
        mmp$xtask_pva_to_sva (pva, sva, status);
        IF NOT status.normal THEN
          IF total_list_entries > 1 THEN
            ijle_p := jmf$ijle_p (pfte_p^.ijl_ordinal);
            ijle_p^.active_io_requests := ijle_p^.active_io_requests + 1;
            io_error := ioc$no_error;
            ioid.specified := FALSE;
            mmp$unlock_rma_list (ioc$no_io, list_p, total_list_entries - 1, ioid, {MF_JOB_FILE} FALSE,
                  io_error, ignore_status);
          IFEND;
          RETURN; {----->
        IFEND;

        page_offset := sva.offset MOD osv$page_size;
        page_count := ((page_offset + length - 1) DIV osv$page_size) + 1;
        IF page_count + total_list_entries - 1 > tape_request_p^.allocated_address_pair_count THEN
          mtp$error_stop ('MM - tape lock rma list, list too small');
        IFEND;

        REPEAT
          #HASH_SVA (sva, pti, hash_count, found);
          IF NOT found THEN

{ A page frame is not assigned to a page that is being locked.
{ Unlock the pages (if any) that have already been locked and return the error
{ mme$page_frame_not_assigned to iom$tape_queue_manager_mtr, who will in turn return to job mode
{ where all the pages will be touched and the monitor request reissued.

            IF total_list_entries > 1 THEN
              ijle_p := jmf$ijle_p (pfte_p^.ijl_ordinal);
              ijle_p^.active_io_requests := ijle_p^.active_io_requests + 1;
              IF list_i > 1 THEN
                ijle_p^.inhibit_swap_count := ijle_p^.inhibit_swap_count + list_i - 1;
                ijle_p^.active_io_page_count := ijle_p^.active_io_page_count + list_i - 1;
              IFEND;
              io_error := ioc$no_error;
              mmp$unlock_rma_list (ioc$no_io, list_p, total_list_entries - 1, ioid, {MF_JOB_FILE} FALSE,
                    io_error, status);
              IF NOT status.normal THEN
                mtp$error_stop ('MM - lock tape rma list, unlock error');
              IFEND;
            IFEND;
            mtp$set_status_abnormal ('MM', mme$page_frame_not_assigned, status);
            RETURN; {<-----}
          IFEND;

          pfti := (mmv$pt_p^ [pti].rma * 512) DIV osv$page_size;
          pfte_p := ^mmv$pft_p^ [pfti];
          pfte_p^.active_io_count := pfte_p^.active_io_count + 1;
          IF (pfte_p^.queue_id = mmc$pq_avail) THEN
            mmp$relink_page_frame (pfti, pfte_p^.aste_p^.queue_id);
            mmv$pt_p^ [pti].v := TRUE;
          IFEND;

          ?IF mmc$debug_rma_list THEN
            IF loop_count_index = 1 THEN
              IF total_list_entries = 1 THEN
                ijl_ordinal := pfte_p^.ijl_ordinal;
              ELSEIF ijl_ordinal <> pfte_p^.ijl_ordinal THEN
                mtp$error_stop ('MM - tape lock rma list, mixed ijl ordinal in lock');
              IFEND;
            IFEND;
          ?IFEND;

          list_p^ [total_list_entries].rma := pfti * osv$page_size + page_offset;
          page_count := page_count - 1;
          IF page_count > 0 THEN
            ?IF mmc$debug_rma_list THEN
              IF loop_count_index = 2 THEN
                mtp$error_stop ('MM - tape lock rma list, transfer count buffer more than 1 page');
              IFEND;
            ?IFEND;
            list_p^ [total_list_entries].length := osv$page_size - page_offset;
            sva.offset := sva.offset + osv$page_size;
            page_offset := 0;
            list_i := list_i + 1;
          ELSE
            list_p^ [total_list_entries].length := ((sva.offset + length - 1) MOD osv$page_size) -
                  page_offset + 1;
            ijle_p := jmf$ijle_p (pfte_p^.ijl_ordinal);
            ijle_p^.inhibit_swap_count := ijle_p^.inhibit_swap_count + list_i;
            ijle_p^.active_io_page_count := ijle_p^.active_io_page_count + list_i;
          IFEND;
          total_list_entries := total_list_entries + 1;
        UNTIL page_count <= 0;

        #real_memory_address (^list_p^ [total_list_entries - list_i], rma);
        IF tape_request_p^.io_type = ioc$explicit_read THEN
          IF loop_count_index = 1 THEN
            tape_request_p^.request.tape_command [command_index * 2].address := rma;
            tape_request_p^.request.tape_command [command_index * 2].length := list_i * 8;
          ELSE
            tape_request_p^.request.tape_command [command_index * 2 + 1].
                  address := list_p^ [total_list_entries - 1].rma;
            EXIT /lock_loop/; {----->
          IFEND;
        ELSE { ioc$explicit_write
          tape_request_p^.request.tape_command [command_index * 2 + 1].address := rma;
          tape_request_p^.request.tape_command [command_index * 2 + 1].length := list_i * 8;
        IFEND;
      FOREND /lock_loop/;

{ Increment active_io_requests in the ijle entry that was used in the preceding trip thru
{ the above FOR loop.  Since all pages must be in the same segment, the ijle pointer is
{ the correct one.
{ The ijle pointer is saved in the wired tape request for use by iom$tape_queue_manager_mtr
{ to increment/decrement the active_cart_tape_write field of the ijl entry.

      ijle_p^.active_io_requests := ijle_p^.active_io_requests + 1;
      tape_request_p^.ijle_p := ijle_p;
    FOREND;

{ The following check is necessary in the case of error recovery.  In that situation,
{ the number of blocks being retried may be less than the original and therefore, the
{ total_list_entries will be less than the amount allocated.  For performance reasons,
{ a new allocated_address_pair_count is not re-calculated before retrying the IO.

    IF total_list_entries - 1 < tape_request_p^.allocated_address_pair_count THEN
      list_p^ [total_list_entries].length := 0;
    IFEND;

    tape_request_p^.list_p := list_p;
    tape_request_p^.address_pair_count := total_list_entries - 1;

  PROCEND mmp$build_lock_rma_list_tape;

?? TITLE := 'MMP$REMOVE_JWS_TO_SHARED_PAGES' ??
?? EJECT ??
{-----------------------------------------------------------------------------
{ If pages of a segment were being kept in a jws, but are now going to be kept
{ in the global queue, it is necessary to remove all pages of the segment from
{ the jws.  This procedure is called to remove the pages when a job shared file
{ segment is opened and more than one user has the file attached.
{ The swap state of the job the pages are being removed from (which is not the
{ job that issued the monitor request) must be considered.
{ If the job is executing or in a "safe" swap status, page will be removed.  If
{ swapout I/O is active, the monitor request will be reissued and a task switch
{ forced (the request will have to wait until the I/O has completed).  If the
{ job is swapped completely out, a delayed swapin bit will be set so that the
{ pages will be removed when the job swaps back in.
{ NOTE:  This request is usually issued because a second user has attached a
{ read only file and the file is now being shared.  The request is also used
{ to remove pages from the JWS when the flush_pages request for a detach_file
{ fails.  When that happens the FDE for the file is turned into a global_shared
{ file.  The AST entry and all pages must reflect that the file is shared.
{ ALSO NOTE:  In the flush-detach fail case, the job which has the pages in
{ its working set cannot be swapped out, because it is issuing the monitor
{ request.
{-----------------------------------------------------------------------------

  PROCEDURE mmp$remove_jws_to_shared_pages
    (    fde_p: gft$locked_file_desc_entry_p;
         cst_p: ^ost$cpu_state_table;
     VAR rb: mmt$rb_ring1_segment_request);

    VAR
      asid: ost$asid,
      aste_p: ^mmt$active_segment_table_entry,
      count_removed: integer,
      dsw_job_shared_asid_changed: [STATIC] jmt$delayed_swapin_work := [jmc$dsw_job_shared_asid_changed],
      ijle_p: ^jmt$initiated_job_list_entry,
      i: integer,
      inhibit_io: boolean,
      jws_ijl_ordinal: jmt$ijl_ordinal,
      next_pfti: mmt$page_frame_index,
      pfti: mmt$page_frame_index,
      pfte_p: ^mmt$page_frame_table_entry,
      queue_id: mmt$page_frame_queue_id,
      sdte_p: ^mmt$segment_descriptor,
      system_ijle_p: ^jmt$initiated_job_list_entry;

    rb.status.normal := TRUE;
    aste_p := ^mmv$ast_p^ [fde_p^.asti];

{ The file may already be shared.  In that case nothing needs to be done.

    IF (aste_p^.queue_id > mmc$pq_shared_last) THEN
      jws_ijl_ordinal := aste_p^.ijl_ordinal;
      IF NOT (jmp$ijl_block_valid (jws_ijl_ordinal)) THEN
        RETURN; {----->
      IFEND;

      ijle_p := jmf$ijle_p (jws_ijl_ordinal);

{ If swapout I/O is active, cause the task to cycle until the swapout I/O has completed.
{ Do not change any AST fields now if the request has to be reissued.

      IF (ijle_p^.swap_status >= jmc$iss_initiate_swapout_io) AND
            (ijle_p^.swap_status <= jmc$iss_swapout_io_complete) THEN

        tmp$reissue_monitor_request;
        tmp$cause_task_switch;

      ELSE

{ The pages of this segment were being kept in a JWS queue but are now going to be kept in
{ one of the shared queues, all pages must be removed from the JWS.  The procedure called
{ will remove the pages immediately if the job is addressable; if the job is swapped out,
{ a delayed swapin bit will be set in the job's ijl entry and the pages will be removed
{ when the job swaps in.  The segment must be changed to reflect that it is now being
{ shared before the call to the remove procedure; the ijlo passed into the remove procedure
{ must be the ijlo of the job that we need to remove the pages from.
{ First, determine which Shared Queue will be used.  NOTE:  A segment number of zero indicates
{ that this request was issued because a flush pages on detach_file failed.  In that case, do
{ not store the asid/asti in the segment table; use shared_other queue.

        IF rb.server_file THEN
          IF (fde_p^.queue_ordinal <> 0) AND (fde_p^.queue_ordinal <= mmv$last_active_shared_queue) THEN
            aste_p^.queue_id := fde_p^.queue_ordinal;
          ELSE
            aste_p^.queue_id := mmc$pq_shared_file_server;
          IFEND;
        ELSEIF rb.segment_number <> 0 THEN
          sdte_p := mmp$get_sdt_entry_p (cst_p^.xcb_p, rb.segment_number);
          sdte_p^.asti := fde_p^.asti;
          mmp$asid (fde_p^.asti, asid);
          sdte_p^.ste.asid := asid;
          aste_p^.queue_id := mmp$determine_shared_queue_id (fde_p, sdte_p);
        ELSE
          aste_p^.queue_id := mmc$pq_shared_other;
        IFEND;

        aste_p^.ijl_ordinal := jmv$system_ijl_ordinal;
        system_ijle_p := jmf$ijle_p (jmv$system_ijl_ordinal);

{ Scan the pages in memory belonging to the segment.  Store pfti's of working set pages so they
{ can be removed.  Adjust I/O counts for pages in the available modified queue.

        count_removed := 0;
        mmp$reset_store_pfti;
        pfti := aste_p^.pft_link.fwd;
        WHILE pfti <> 0 DO
          pfte_p := ^mmv$pft_p^ [pfti];
          IF pfte_p^.queue_id = mmc$pq_job_working_set THEN
            mmp$store_pfti (pfti);
          ELSEIF (pfte_p^.queue_id = mmc$pq_avail_modified) OR
                (pfte_p^.queue_id = mmc$pq_shared_io_error) THEN
            ijle_p^.inhibit_swap_count := ijle_p^.inhibit_swap_count - pfte_p^.active_io_count;
            ijle_p^.active_io_page_count := ijle_p^.active_io_page_count - pfte_p^.active_io_count;
            system_ijle_p^.inhibit_swap_count := system_ijle_p^.inhibit_swap_count + pfte_p^.active_io_count;
            system_ijle_p^.active_io_page_count := system_ijle_p^.active_io_page_count +
                  pfte_p^.active_io_count;
            pfte_p^.ijl_ordinal := jmv$system_ijl_ordinal;
          IFEND;
          pfti := mmv$pft_p^ [pfti].segment_link.fwd;
        WHILEND;

        IF (ijle_p^.swap_status <= jmc$iss_swapped_io_cannot_init) OR
              (ijle_p^.swap_status = jmc$iss_swapped_io_complete) THEN

          mmp$fetch_pfti_array_size (count_removed);
          IF count_removed > 0 THEN
            mmp$reset_find_next_pfti (pfti);
            WHILE pfti <> 0 DO
              pfte_p := ^mmv$pft_p^ [pfti];
              mmp$relink_page_frame (pfti, pfte_p^.aste_p^.queue_id);
              ijle_p^.inhibit_swap_count := ijle_p^.inhibit_swap_count - pfte_p^.active_io_count;
              ijle_p^.active_io_page_count := ijle_p^.active_io_page_count - pfte_p^.active_io_count;
              system_ijle_p^.inhibit_swap_count := system_ijle_p^.inhibit_swap_count +
                    pfte_p^.active_io_count;
              system_ijle_p^.active_io_page_count := system_ijle_p^.active_io_page_count +
                    pfte_p^.active_io_count;
              pfte_p^.ijl_ordinal := jmv$system_ijl_ordinal;
              mmp$find_next_pfti (pfti);
            WHILEND;

            IF ((ijle_p^.swap_status >= jmc$iss_job_idle_tasks_complete) AND
                  (ijle_p^.swap_status <= jmc$iss_swapped_io_cannot_init)) OR
                  (ijle_p^.swap_status = jmc$iss_swapped_io_complete) THEN
              jsp$recalculate_swapped_pages (ijle_p, count_removed);
            IFEND;
          IFEND;
        ELSE
          ijle_p^.delayed_swapin_work := ijle_p^.delayed_swapin_work + dsw_job_shared_asid_changed;
        IFEND;
      IFEND;
    IFEND;

  PROCEND mmp$remove_jws_to_shared_pages;


?? TITLE := '[XDCL] mmp$remove_swapped_shared_pages', EJECT ??
{-----------------------------------------------------------------------------
{ This procedure is called on swapin to remove job shared pages from the working
{ set of a job that is swapping in.  The job was swapped out when the when a job
{ shared file was attached by a second job, causing a working-set-to-shared
{ transition.  At the time of the transition a delayed_swapin_work indicator
{ was set for the job.  The job's entire working set must be scanned for pages
{ that should no be in the shared queue.
{-----------------------------------------------------------------------------

  PROCEDURE [XDCL] mmp$remove_swapped_shared_pages
    (    ijle_p: ^jmt$initiated_job_list_entry);

    VAR
      next_pfti: mmt$page_frame_index,
      pfti: mmt$page_frame_index,
      pfte_p: ^mmt$page_frame_table_entry,
      system_ijle_p: ^jmt$initiated_job_list_entry;

    { Scan the job_working_set queue; any pages for which the ast.queue_id is the global
    { queue need to be removed and put in the available queue.

    system_ijle_p := jmf$ijle_p (jmv$system_ijl_ordinal);

    pfti := ijle_p^.job_page_queue_list [mmc$pq_job_working_set].link.bkw;
    WHILE pfti <> 0 DO
      pfte_p := ^mmv$pft_p^ [pfti];
      next_pfti := pfte_p^.link.bkw;
      IF (pfte_p^.aste_p^.queue_id >= mmc$pq_shared_first) AND
            (pfte_p^.aste_p^.queue_id <= mmc$pq_shared_last) THEN
        mmp$relink_page_frame (pfti, pfte_p^.aste_p^.queue_id);
        ijle_p^.inhibit_swap_count := ijle_p^.inhibit_swap_count - pfte_p^.active_io_count;
        ijle_p^.active_io_page_count := ijle_p^.active_io_page_count - pfte_p^.active_io_count;
        system_ijle_p^.inhibit_swap_count := system_ijle_p^.inhibit_swap_count + pfte_p^.active_io_count;
        system_ijle_p^.active_io_page_count := system_ijle_p^.active_io_page_count + pfte_p^.active_io_count;
        pfte_p^.ijl_ordinal := jmv$system_ijl_ordinal;
      IFEND;
      pfti := next_pfti;
    WHILEND;

  PROCEND mmp$remove_swapped_shared_pages;

?? TITLE := 'MMP$REMOVE_DETACHED_PAGES', EJECT ??
{---------------------------------------------------------------------
{ This procedure removes job working set pages of a file being detached
{ from the working set of the job doing the detach.
{---------------------------------------------------------------------

  PROCEDURE mmp$remove_detached_pages
    (    sva: ost$system_virtual_address;
         aste_p: ^mmt$active_segment_table_entry;
         ijl_ordinal: jmt$ijl_ordinal);

    VAR
      pfti: mmt$page_frame_index;

    mmp$initialize_find_next_pfti (sva, 7ffffff0(16), include_partial_pages, psc_nominal_queue, aste_p, pfti);
    WHILE pfti <> 0 DO
      IF (mmv$pft_p^ [pfti].queue_id > mmc$pq_first_valid_in_pt) THEN
        IF mmv$pft_p^ [pfti].locked_page = mmc$lp_page_in_lock THEN
          mmp$delete_pt_entry (pfti, TRUE);
          mmp$relink_page_frame (pfti, mmc$pq_free);
        ELSE
          mmp$remove_page_from_job (pfti);
        IFEND;
        mmv$pft_p^ [pfti].locked_page := mmc$lp_not_locked;
      IFEND;
      mmp$find_next_pfti (pfti);
    WHILEND;

  PROCEND mmp$remove_detached_pages;

?? TITLE := 'MMP$REMOVE_PAGES_WORKING_SET' ??
?? EJECT ??
{---------------------------------------------------------------------
{This procedure removes pages from a job working set or shared working set
{All pages totally or partially contained between SVA to SVA + LENGTH are
{moved to the AVAILABLE or AVAILABLE-MODIFIED queue.
{---------------------------------------------------------------------

  PROCEDURE [XDCL] mmp$remove_pages_working_set
    (    sva: ost$system_virtual_address;
         length: ost$segment_length;
         aste_p: ^mmt$active_segment_table_entry;
     VAR rcount: integer);

    VAR
      cst_p: ^ost$cpu_state_table,
      fcount: integer,
      mcount: integer,
      pfti: mmt$page_frame_index;

{ #### CHECK THIS ALGORITHM CAREFULLY.  Consider restricted attach/detach, job shared files, restricted
{ #### attach/detach of global shared files.

    cst_p := mtf$cst_p ();

{ Do not remove pages from the shared queue, nor from another job's working set.

    IF (aste_p^.queue_id <> mmc$pq_job_working_set) OR (aste_p^.ijl_ordinal <> cst_p^.ijl_ordinal) THEN
      RETURN; {----->
    IFEND;

    mmp$initialize_find_next_pfti (sva, length, include_partial_pages, psc_nominal_queue, aste_p, pfti);
    IF pfti = 0 THEN
      rcount := 0;
      RETURN {----->
    IFEND;

{ Delete locked pages from the array of pages to be removed.
{ ### There should be a better way of doing this than scanning the whole array.

    WHILE pfti <> 0 DO

      IF mmv$pft_p^ [pfti].locked_page <> mmc$lp_not_locked THEN
        mmp$delete_last_pfti_from_array;
      IFEND;
      mmp$find_next_pfti (pfti);
    WHILEND;

{All calls are from mtr requests. If it is not necessary, we will not do a dequeue to the overrun q.
    mmp$remove_pages_from_jws (mmc$pq_avail_modified, cst_p^.ijle_p, FALSE {= overrun Q} , fcount, mcount,
          rcount);

  PROCEND mmp$remove_pages_working_set;

?? TITLE := 'MMP$MM_FREE_PAGES' ??
?? EJECT ??
{---------------------------------------------------------------------
{This procedure frees (moves the page frames to the free page queue) all
{pages TOTALLY contained in the range SVA to SVA + LENGTH.
{NOTE that modified pages are NOT written to disk.
{If ASID is to be freed, map is not purged prior to deleting page frames.
{---------------------------------------------------------------------

  PROCEDURE [XDCL] mmp$mm_free_pages
    (    sva: ost$system_virtual_address;
         length: ost$segment_length;
         aste_p: ^mmt$active_segment_table_entry;
         free_asid: boolean;
     VAR count: integer);

    VAR
      contiguous_pages: integer,
      first_pfti: mmt$page_frame_index,
      ijl_p: ^jmt$initiated_job_list_entry,
      pfti: mmt$page_frame_index;

    count := 0;

    mmp$initialize_find_next_pfti (sva, length, exclude_partial_pages, psc_all, aste_p, pfti);

    IF NOT free_asid THEN
      WHILE pfti <> 0 DO
        mmv$pt_p^ [mmv$pft_p^ [pfti].pti].v := FALSE;
        mmp$find_next_pfti (pfti);
      WHILEND;
      mmp$purge_all_cache_map;
      mmp$reset_find_next_pfti (pfti);
    IFEND;

    WHILE pfti <> 0 DO
      mmp$delete_pt_entry (pfti, TRUE);
      mmp$relink_page_frame (pfti, mmc$pq_free);
      count := count + 1;
      mmp$find_next_pfti (pfti);
    WHILEND;

    IF aste_p^.queue_id = mmc$pq_job_fixed THEN
      ijl_p := jmf$ijle_p (aste_p^.ijl_ordinal);
      contiguous_pages := 0;
      first_pfti := ijl_p^.job_page_queue_list [mmc$pq_job_fixed].link.bkw;
      IF (mmv$pft_p^ [first_pfti].sva.offset <> 0) THEN
        WHILE mmv$pft_p^ [first_pfti].sva.offset <> 0 DO
          contiguous_pages := contiguous_pages + 1;
          first_pfti := mmv$pft_p^ [first_pfti].link.bkw;
        WHILEND;
      IFEND;
      ijl_p^.job_fixed_contiguous_pages := contiguous_pages;
      mmv$total_contig_pages_assigned := mmv$total_contig_pages_assigned - contiguous_pages;
    IFEND;
    IF free_asid THEN
      mmp$free_asid (sva.asid, aste_p);
    IFEND;

  PROCEND mmp$mm_free_pages;

?? TITLE := 'MMP$MM_WRITE_MODIFIED_PAGES' ??
?? EJECT ??
{---------------------------------------------------------------------
{Name:
{  mmp$mm_write_modified_pages
{Purpose:
{  This procedure writes all modified pages within a specified SVA range
{  to the backing file for the segment.  All pages totally or partially
{  contained in the range of SVA to SVA + LENGTH are written.
{
{  If init_new_io is FALSE then the monitor request has been reissued
{  and only status will be returned.   Job mode sets init_new_io to TRUE
{  in the RB and mmp$process_wmp_status or mmp$mtr_write will set it
{  to FALSE if the wait option is selected.
{---------------------------------------------------------------------


  PROCEDURE [XDCL] mmp$mm_write_modified_pages
    (    sva: ost$system_virtual_address;
         length: ost$segment_length;
         fde_p: gft$locked_file_desc_entry_p;
         aste_p: ^mmt$active_segment_table_entry;
         iotype: iot$io_function;
         init_new_io: boolean;
         remove_page: boolean;
         io_id: mmt$io_identifier;
     VAR io_count: mmt$active_io_count;
     VAR io_already_active: boolean;
     VAR last_written_pfti: mmt$page_frame_index;
     VAR wmp_status: mmt$write_modified_pages_status);

    VAR
      cst_p: ^ost$cpu_state_table,
      pfte_p: ^mmt$page_frame_table_entry,
      pfti: mmt$page_frame_index,
      write_status: mmt$write_page_to_disk_status;

    io_count := 0;
    io_already_active := FALSE;
    wmp_status := mmc$wmp_io_complete;
    last_written_pfti := 0;

    cst_p := mtf$cst_p ();
    mmp$initialize_find_next_pfti (sva, length, include_partial_pages, psc_all_except_avail, aste_p, pfti);

    IF init_new_io THEN

    /write_loop/
      WHILE pfti <> 0 DO
        pfte_p := ^mmv$pft_p^ [pfti];
        IF mmv$pt_p^ [pfte_p^.pti].m THEN

          mmp$write_page_to_disk (fde_p, pfti, iotype, io_id, mmv$multi_page_write, write_status);
          IF write_status = ws_ok THEN
            last_written_pfti := pfti;

            IF io_id.specified THEN
              io_count := io_count + 1;
            IFEND;

          ELSEIF write_status = ws_disk_flaws THEN
            wmp_status := mmc$wmp_io_errors;
          ELSEIF write_status = ws_volume_unavailable THEN
            wmp_status := mmc$wmp_volume_unavailable;
          ELSEIF write_status = ws_server_terminated THEN
            wmp_status := mmc$wmp_server_terminated;
          ELSE
            wmp_status := mmc$wmp_io_initiation_reject;
            EXIT /write_loop/; {----->
          IFEND;

        ELSEIF (pfte_p^.active_io_count <> 0) AND (last_written_pfti = 0) THEN
          last_written_pfti := pfti;
          IF io_id.specified THEN
            io_already_active := TRUE;
          IFEND;
        IFEND;

        { REMOVE_PAGE is true only if the request code is mmc$sr1_detach_file.
        { When a permanent file is detached, if the pages are being kept in a job's working set
        { all those pages must be removed.
{ ###!!   The ijl ordinal check is needed to prevent a "restricted" attach/detach (access mode = none)
        { from removing pages from the working set of the job that really has the file attached.
        { (There is a timing problem--if the real detach and a new shared attach occurrs before the
        { restricted detach, the queue status will incorrectly remain working set for a shared file.)
        { When perm files cleans up the restricted attach, the ijl ordinal check can probably be removed.

        IF remove_page
{     } AND (pfte_p^.queue_id > mmc$pq_first_valid_in_pt)
{     } AND NOT (mmv$pt_p^ [pfte_p^.pti].m)
{     } AND (mmv$shared_pages_in_jws)
{     } AND (aste_p^.queue_id = mmc$pq_job_working_set)
{     } AND (pfte_p^.ijl_ordinal = cst_p^.ijl_ordinal) THEN

          IF (pfte_p^.locked_page = mmc$lp_page_in_lock) OR (pfte_p^.locked_page =
                mmc$lp_server_allocate_lock) THEN
            mmp$delete_pt_entry (pfti, TRUE);
            mmp$relink_page_frame (pfti, mmc$pq_free);
          ELSE
            mmp$remove_page_from_job (pfti);
          IFEND;
          pfte_p^.locked_page := mmc$lp_not_locked;
        IFEND;

        mmp$find_next_pfti (pfti);
      WHILEND /write_loop/;
    ELSE

    /status_loop/
      WHILE pfti <> 0 DO
        pfte_p := ^mmv$pft_p^ [pfti];
        IF mmv$pt_p^ [pfte_p^.pti].m THEN
          IF (pfte_p^.queue_id = mmc$pq_job_io_error) OR (pfte_p^.queue_id = mmc$pq_shared_io_error) THEN
            IF (pfte_p^.io_error <> ioc$unrecovered_error_unit_down) AND
                  (pfte_p^.io_error <> ioc$unit_down_on_init) THEN
              wmp_status := mmc$wmp_io_errors;
            ELSE
              wmp_status := mmc$wmp_volume_unavailable;
            IFEND;
          IFEND;

        ELSEIF (pfte_p^.active_io_count <> 0) THEN
          last_written_pfti := pfti;
          IF io_id.specified THEN
            io_already_active := TRUE;
          IFEND;
        IFEND;

        mmp$find_next_pfti (pfti);
      WHILEND /status_loop/;
    IFEND;

    IF io_id.specified THEN
      IF (io_count > 0) AND (wmp_status = mmc$wmp_io_complete) THEN
        wmp_status := mmc$wmp_io_active;
      IFEND;
    ELSEIF (last_written_pfti <> 0) AND (wmp_status <> mmc$wmp_volume_unavailable) AND
          (wmp_status <> mmc$wmp_io_initiation_reject) THEN
      wmp_status := mmc$wmp_io_active;
    IFEND;

  PROCEND mmp$mm_write_modified_pages;

?? TITLE := 'MMP$PROCESS_WMP_STATUS', EJECT ??

{----------------------------------------------------------------------------------------
{Name:
{  mmp$process_wmp_status
{Purpose:
{  This procedure processes the status returned by mmp$mm_write_modified_pages.  If
{  IO inititation was not completed the monitor request will be reissued.  If the
{  wait option was selected, init_new_io in the RB will be set to FALSE and the
{  monitor request reissued.
{
{  Rb_init_new_io and rb_status are input/output parameters and should not be initialized.
{
{----------------------------------------------------------------------------------------


  PROCEDURE [XDCL] mmp$process_wmp_status
    (    wmp_status: mmt$write_modified_pages_status;
         last_written_pfti: mmt$page_frame_index;
         rb_wait: ost$wait;
     VAR rb_init_new_io: boolean;
     VAR rb_status: syt$monitor_status);

    CASE wmp_status OF
    = mmc$wmp_io_initiation_reject =
      IF last_written_pfti <> 0 THEN
        IF mmv$pft_p^ [last_written_pfti].active_io_count = 0 THEN
          mtp$error_stop ('MM - WMP tried to queue and no IO');
        IFEND;
        tmp$queue_task (mtf$cst_p ()^.taskid, tmc$ts_page_wait, mmv$pft_p^ [last_written_pfti].task_queue);
        tmp$reissue_monitor_request;
      ELSE
        tmp$reissue_monitor_request;
        tmp$cause_task_switch;
      IFEND;

    = mmc$wmp_io_complete =

    = mmc$wmp_io_active =
      IF rb_wait = osc$wait THEN
        IF mmv$pft_p^ [last_written_pfti].active_io_count = 0 THEN
          mtp$error_stop ('MM - WMP tried to queue and no IO');
        IFEND;
        tmp$queue_task (mtf$cst_p ()^.taskid, tmc$ts_page_wait, mmv$pft_p^ [last_written_pfti].task_queue);
        rb_init_new_io := FALSE;
        tmp$reissue_monitor_request;
      IFEND;

    = mmc$wmp_volume_unavailable =
      mtp$set_status_abnormal ('MM', mme$volume_unavailable, rb_status);

    = mmc$wmp_io_errors =
      mtp$set_status_abnormal ('MM', mme$io_write_error, rb_status);

    = mmc$wmp_server_terminated =
      mtp$set_status_abnormal ('DF', dfe$server_has_terminated, rb_status);

    CASEND;

  PROCEND mmp$process_wmp_status;

?? TITLE := 'MMP$MM_CONDITIONAL_FREE' ??
?? EJECT ??

{ Purpose:
{   This procedure conditionally frees pages of a segment in the address range of SVA to
{   SVA + LENGTH - 1.  The modified bits are cleared to prevent writing the pages to disk.
{   The pages will remain in the job's working set though in case they are referenced again
{   soon.  Normal aging will move the pages to the free queue when they are no longer being
{   referenced.

  PROCEDURE mmp$mm_conditional_free
    (    sva: ost$system_virtual_address;
         length: ost$segment_length;
         aste_p: ^mmt$active_segment_table_entry);

    VAR
      pfte_p: ^mmt$page_frame_table_entry,
      pfti: mmt$page_frame_index,
      pte_p: ^ost$page_table_entry,
      purge_all_page_maps: boolean;

    purge_all_page_maps := FALSE;

    mmp$initialize_find_next_pfti (sva, length, exclude_partial_pages, psc_all_except_avail, aste_p, pfti);
    WHILE pfti <> 0 DO
      pfte_p := ^mmv$pft_p^ [pfti];
      pte_p := ^mmv$pt_p^ [pfte_p^.pti];
      IF (pfte_p^.queue_id = mmc$pq_avail_modified) AND (pfte_p^.active_io_count = 0) THEN
        pte_p^.m := FALSE;

{ Whenever the modified bit is cleared on a page in the available_modified queue that does not have
{ I/O active, the SOON count must be incremented.  When relink_page_frame takes an UNMODIFIED page out
{ of the available_modified queue, it assumes I/O has been done and will decrement the soon count.

        mmv$reassignable_page_frames.soon := mmv$reassignable_page_frames.soon + 1;
        mmp$relink_page_frame (pfti, mmc$pq_avail);
      ELSEIF pte_p^.v THEN

{ Pages must be removed from the io error queues before the modified bit is cleared.

        IF (pfte_p^.queue_id = mmc$pq_shared_io_error) OR (pfte_p^.queue_id = mmc$pq_job_io_error) THEN
          mmp$relink_page_frame (pfti, pfte_p^.aste_p^.queue_id);
        IFEND;
        pte_p^.m := FALSE;
        IF pte_p^.u THEN
          pte_p^.u := FALSE;
          pfte_p^.age := 0;
          pfte_p^.cyclic_age := 0;
        IFEND;
        purge_all_page_maps := TRUE;
      IFEND;
      mmp$find_next_pfti (pfti);
    WHILEND;

    IF purge_all_page_maps THEN
      mmp$sva_purge_all_page_map (sva);
    IFEND;

  PROCEND mmp$mm_conditional_free;

?? TITLE := 'MMP$FREE_FLUSH' ??
?? EJECT ??
{--------------------------------------------------------------------------------------------------------
{Name:
{  mmp$free_flush
{Purpose:
{  This routine processes the 'FREE' , 'WRITE MODIFIED PAGES', and
{  'CONDITIONAL FREE' requests
{Input:
{  rb - request block
{Output:
{  none
{Error Codes:
{  invalid PVA
{--------------------------------------------------------------------------------------------------------

  PROCEDURE [XDCL] mmp$free_flush
    (VAR rb: mmt$rb_free_flush;
         cst_p: ^ost$cpu_state_table);

    VAR
      fde_p: gft$locked_file_desc_entry_p,
      wmp_status: mmt$write_modified_pages_status,
      last_written_pfti: mmt$page_frame_index,
      ste_p: ^mmt$segment_descriptor,
      stxe_p: ^mmt$segment_descriptor_extended,
      aste_p: ^mmt$active_segment_table_entry,
      page_count: integer,
      io_id: mmt$io_identifier,
      io_count: mmt$active_io_count,
      io_already_active: boolean,
      sva: ost$system_virtual_address;


    io_id.specified := FALSE;

    IF NOT mmv$tables_initialized THEN
      RETURN {----->
    IFEND;
    rb.status.normal := TRUE;


{Free pages.
    IF cst_p^.xcb_p^.xp.p_register.pva.ring > 3 THEN
      mmp$verify_pva (^rb.pva, mmc$sat_write, rb.status);
    ELSE
      mmp$verify_pva (^rb.pva, mmc$sat_read_or_write, rb.status);
    IFEND;
    IF rb.status.normal THEN
      mmp$convert_pva (rb.pva, cst_p, sva, fde_p, aste_p, ste_p, stxe_p);
      CASE rb.reqcode OF
      = syc$rc_write_modified_pages =
        IF (aste_p^.queue_id <> mmc$pq_job_working_set) AND
              ((aste_p^.queue_id < mmc$pq_shared_first) OR (aste_p^.queue_id > mmc$pq_shared_last)) THEN
          {Queue_id is neither JWS nor a shared queue}
          mtp$set_status_abnormal ('MM', mme$segment_not_pageable, rb.status);
        ELSEIF (fde_p^.media = gfc$fm_transient_segment) OR
              (stxe_p^.assign_active <> mmc$assign_active_null) THEN
          mtp$set_status_abnormal ('MM', mme$segment_not_assigned_device, rb.status);
        ELSE
          mmp$mm_write_modified_pages (sva, rb.length, fde_p, aste_p, ioc$write_page, rb.init_new_io, FALSE,
                io_id, io_count, io_already_active, last_written_pfti, wmp_status);
          mmp$process_wmp_status (wmp_status, last_written_pfti, rb.waitopt, rb.init_new_io, rb.status);
        IFEND;
      = syc$rc_free_pages =
        mmp$mm_free_pages (sva, rb.length, aste_p, FALSE, page_count);
      = syc$rc_conditional_free =
        mmp$mm_conditional_free (sva, rb.length, aste_p);
      ELSE
        mtp$set_status_abnormal ('MM', mme$invalid_request, rb.status);
      CASEND;
    IFEND;

  PROCEND mmp$free_flush;
?? TITLE := 'MMP$MOVE_MODIFIED_SERVER_PAGE', EJECT ??
{-------------------------------------------------------------------------------------------------------------
{ NAME:
{   MMP$MOVE_MODIFIED_SERVER_PAGE
{ PURPOSE:
{   This monitor request will move a page frame from a source file , specified by a system_file_id, to a
{ destination file.  It is used to update the server image file on the client in the case of a server crash.
{ When the request completes, the page in the range from <rb.byte_offset> to <rb.byte_offset>+ <osv$page_size>
{ will have been moved to the range of addresses specified by <rb.destination_pva> to <rb.destination_pva>+
{ <osv$page_size>.  This procedure assumes that <rb.destination_pva> is on a page boundary.  The procedure
{ executes in monitor mode on behalf of a system task which writes the server image file with pages from all
{ of the currently attached server files.
{
{ CAUTION:  Be sure to fully understand how the 'move' is accomplished before changing this
{           procedure.  Because mmp$delete_pt_entry USES information and mmp$make_pt_entry
{           CHANGES information in the page frame table entry, it is necessary to 'move' the
{           page in the following order:
{             1.  Delete the source page table entry.
{             2.  Change the page frame table entry to reflect destination page information.
{             3.  Make the page table entry for the destination page.
{             4.  If necessary, relink the page frame to the queue for the destination segment.
{             5.  Set the valid bit on the destination page.
{
{ PROCEDURE [XDCL] mmp$move_modified_server_page
{   (VAR rb: mmt$rb_ring1_server_seg_request;
{        cst_p: ^ost$cpu_state_table);
{
{   RB: (INPUT/OUTPUT) Specifies the request block containing the information
{       which will be used by the procedure to move (if possible) a single page
{       to the destination (server image) file.
{         RB.SFID: Specifies the System File ID of the file whose pages must be
{            moved to the destination file
{         RB.GLOBAL_FILE_NAME: Specifies the global file name of the file whose
{            pages must be moved to the destination file
{         RB.DESTINATION_PVA: Specifies the location within the server image
{            file to which a located modified page will be written
{         RB.BYTE_OFFSET: Specifies the beginning offset of the located page
{            which has been moved.  This is an output value.
{         RB.STATUS: Specifies the completion status of the request.  Conditions
{            which can be returned are:
{              mme$io_active_on_move_page
{              mme$no_pages_found_for_move
{              mme$page_table_full
{   CST_P: (INPUT) Specifies the pointer to the CPU state table which is
{       executing this process.
{-------------------------------------------------------------------------------------------------------------

  PROCEDURE [XDCL] mmp$move_modified_server_page
    (VAR rb: mmt$rb_ring1_server_seg_request;
         cst_p: ^ost$cpu_state_table);

?? NEWTITLE := '  DETERMINE_MOVE_PAGE_STATUS', EJECT ??

    PROCEDURE [INLINE] determine_move_page_status
      (    pfti: mmt$page_frame_index;
           fde_p: gft$locked_file_desc_entry_p;
       VAR move_page: boolean;
       VAR lock_encountered: boolean);

      VAR
        pft_entry_p: ^mmt$page_frame_table_entry;

      move_page := FALSE;
      pft_entry_p := ^mmv$pft_p^ [pfti];

      IF NOT mmv$pt_p^ [pft_entry_p^.pti].m THEN
        RETURN; {----->
      IFEND;

{ The page is not valid for the move to the image if the page lock indicates the page doesn't contain data
{ (page is being read from disk or server allocation is occurring) or the user has locked the page to prevent
{ IO.

      IF (pft_entry_p^.locked_page = mmc$lp_aging_lock) OR
            (pft_entry_p^.locked_page = mmc$lp_server_allocate_lock) OR
            (pft_entry_p^.locked_page = mmc$lp_page_in_lock) THEN
        lock_encountered := TRUE;
        RETURN; {----->
      IFEND;

{ The page is not valid for the move to the image if the segment is locked (MMP$LOCK_SEGMENT) UNLESS the page
{ is still being written from a previous MMP$UNLOCK_SEGMENT with write_protection.

      IF fde_p^.segment_lock.locked_for_write AND (pft_entry_p^.locked_page <> mmc$lp_write_protected_lock)
            THEN
        lock_encountered := TRUE;
        RETURN; {----->
      IFEND;

      move_page := TRUE;

    PROCEND determine_move_page_status;
?? OLDTITLE, EJECT ??

    VAR
      asid: ost$asid,
      aste_p: ^mmt$active_segment_table_entry,
      asti: mmt$ast_index,
      count: 1 .. 32,
      destination_aste_p: ^mmt$active_segment_table_entry,
      destination_pfte_p: ^mmt$page_frame_table_entry,
      destination_pfti: mmt$page_frame_index,
      destination_pti: integer,
      destination_ste_p: ^mmt$segment_descriptor,
      destination_stxe_p: ^mmt$segment_descriptor_extended,
      destination_sva: ost$system_virtual_address,
      fde_p: gft$locked_file_desc_entry_p,
      found: boolean,
      lock_encountered: boolean,
      move_page: boolean,
      mpt_status: mmt$make_pt_entry_status,
      pfti: mmt$page_frame_index,
      save_valid: boolean,
      source_aste_p: ^mmt$active_segment_table_entry,
      source_sva: ost$system_virtual_address;


    rb.status.normal := TRUE;

{ Set up variables for the search through the ASTE for pages associated with the RB.SFID.

    gfp$mtr_get_locked_fde_p (rb.sfid, cst_p^.ijle_p, fde_p);
    mmp$get_verify_asti_in_fde (fde_p, rb.sfid, cst_p^.ijl_ordinal, asti);
    IF asti = 0 THEN
      mtp$set_status_abnormal ('MM', mme$no_pages_found_for_move, rb.status);
      RETURN; {----->
    IFEND;

    mmp$asid (asti, asid);
    source_sva.asid := asid;

    mmp$verify_pva (^rb.destination_pva, mmc$sat_write, rb.status);
    IF NOT rb.status.normal THEN
      RETURN; {----->
    IFEND;

    mmp$convert_pva (rb.destination_pva, cst_p, destination_sva, fde_p, destination_aste_p, destination_ste_p,
          destination_stxe_p);

    source_aste_p := ^mmv$ast_p^ [asti];

{ Start looking through the aste_p^.pft_link for modified pages.  Free those pages which are not modified
{ and have no IO active on them.

    pfti := source_aste_p^.pft_link.fwd;
    lock_encountered := FALSE;

  /locate_a_modified_page/
    WHILE (pfti <> 0) AND (mmv$pft_p^ [pfti].active_io_count = 0) DO
      save_valid := mmv$pt_p^ [mmv$pft_p^ [pfti].pti].v;
      mmv$pt_p^ [mmv$pft_p^ [pfti].pti].v := FALSE;
      mmp$purge_all_map_proc;
      determine_move_page_status (pfti, fde_p, move_page, lock_encountered);
      IF NOT move_page THEN
        IF dfv$file_server_debug_enabled AND lock_encountered THEN
          display_integer_monitor (' Lock encountered ', pfti);
        IFEND;
        mmp$delete_pt_entry (pfti, TRUE);
        mmp$relink_page_frame (pfti, mmc$pq_free);
        pfti := source_aste_p^.pft_link.fwd;
      ELSE
        mmv$pt_p^ [mmv$pft_p^ [pfti].pti].v := save_valid;
        EXIT /locate_a_modified_page/; {----->
      IFEND;
    WHILEND /locate_a_modified_page/;

    IF pfti = 0 THEN
      mtp$set_status_abnormal ('MM', mme$no_pages_found_for_move, rb.status);
      mmp$purge_all_map_proc;
      IF lock_encountered THEN
        mmp$purge_all_cache_proc;
      IFEND;
      RETURN; {----->
    ELSEIF (mmv$pft_p^ [pfti].active_io_count > 0) THEN
      mtp$set_status_abnormal ('MM', mme$io_active_on_move_page, rb.status);
      mmp$purge_all_map_proc;
      IF lock_encountered THEN
        mmp$purge_all_cache_proc;
      IFEND;
      RETURN; {----->
    IFEND;

{ We have located a modified page of the file which now can be moved.
{ Delete the source page, but save the valid bit in case we need to restore the page table.
{ (The valid bit was saved as part of the operation to locate a page above.)

    source_sva.offset := mmv$pft_p^ [pfti].sva.offset;
    mmp$delete_pt_entry (pfti, TRUE);
    mmp$purge_all_cache_map_proc;

{ The destination page should not be in the page table; if it is, delete it.

    #HASH_SVA (destination_sva, destination_pti, count, found);
    IF found THEN
      destination_pfti := (mmv$pt_p^ [destination_pti].rma * 512) DIV osv$page_size;
      mmp$delete_pt_entry (destination_pfti, TRUE);
      mmp$relink_page_frame (destination_pfti, mmc$pq_free);
    IFEND;

{ Change the page frame table entry to the destination page information.

    destination_pfte_p := ^mmv$pft_p^ [pfti];
    destination_pfte_p^.aste_p := destination_aste_p;
    destination_pfte_p^.sva := destination_sva;

{ Make the page table entry for the destination page.  If the page table is full, replace the source page to
{ the page table and return an abnormal status; job mode will reissue the request.

    mmp$make_pt_entry (destination_sva, pfti, destination_aste_p, destination_pfte_p, mpt_status);
    IF mpt_status = mmc$mpt_page_table_full THEN
      mmv$async_work.pt_full_aste_p := destination_aste_p;
      mmv$async_work.pt_full_sva := destination_sva;
      mmv$async_work.pt_full := TRUE;
      mmv$time_to_call_mem_mgr := 0;
      osv$time_to_check_asyn := 0;
      destination_pfte_p^.aste_p := source_aste_p;
      destination_pfte_p^.sva := source_sva;
      mmp$make_pt_entry (source_sva, pfti, source_aste_p, destination_pfte_p, mpt_status);
      IF mpt_status <> mmc$mpt_done THEN
        mtp$error_stop ('MOVE_MODIFIED_SERVER_PAGE -- COULD NOT REMAKE PAGE TABLE ENTRY');
      IFEND;
      mmv$pt_p^ [destination_pfte_p^.pti].m := TRUE;
      mmv$pt_p^ [destination_pfte_p^.pti].v := save_valid;
      mtp$set_status_abnormal ('MM', mme$page_table_full, rb.status);
      IF lock_encountered THEN
        mmp$purge_all_cache_proc;
      IFEND;
      RETURN; {----->
    IFEND;

{ Relink the page into the destination's queue.

    mmv$pt_p^ [destination_pfte_p^.pti].v := TRUE;
    mmv$pt_p^ [destination_pfte_p^.pti].m := TRUE;

    mmp$relink_page_frame (pfti, mmc$pq_wired);
    mmv$pft_p^ [pfti].ijl_ordinal := cst_p^.ijl_ordinal;
    mmp$relink_page_frame (pfti, destination_aste_p^.queue_id);

    IF lock_encountered THEN
      mmp$purge_all_cache_proc;
    IFEND;

    rb.byte_offset := source_sva.offset;

  PROCEND mmp$move_modified_server_page;
?? TITLE := 'MMP$MTR_R1_SERVER_SEG_REQUEST', EJECT ??
{--------------------------------------------------------------------------------------------------------
{ NAME:
{   MMP$MTR_R1_SERVER_SEG_REQUEST
{ PURPOSE:
{   This procedure processes some ring 1 requests for server segments.
{--------------------------------------------------------------------------------------------------------

  PROCEDURE [XDCL] mmp$mtr_r1_server_seg_request
    (VAR rb: mmt$rb_ring1_server_seg_request;
         cst_p: ^ost$cpu_state_table);

    VAR
      able: boolean,
      asid: ost$asid,
      aste_p: ^mmt$active_segment_table_entry,
      asti: mmt$ast_index,
      fde_p: gft$locked_file_desc_entry_p,
      ignore_relink_status: mmt$relink_page_status,
      ijle_p: ^jmt$initiated_job_list_entry,
      inhibit_io: boolean,
      mcount: integer,
      pfti: mmt$page_frame_index,
      rcount: integer,
      save_v: boolean,
      xsva: ost$system_virtual_address;


    CASE rb.request OF
    = mmc$ssr1_flush_delete_seg_sfid, mmc$ssr1_free_delete_seg_sfid =
      rb.pages_not_deleted := 0;
{ Do nothing. This is normal.
      ;
    = mmc$ssr1_move_modified_df_page =
      mmp$move_modified_server_page (rb, cst_p);
      RETURN; {----->
    ELSE
      mtp$error_stop ('MM - Unknown request - mmp$mtr_r1_server_seg_request');
    CASEND;

{ Note: job mode has the FDE locked when making this request. This is required to
{ keep the FDE from being deleted by another job while this request is being processed.

    gfp$mtr_get_locked_fde_p (rb.sfid, cst_p^.ijle_p, fde_p);
    mmp$get_verify_asti_in_fde (fde_p, rb.sfid, cst_p^.ijl_ordinal, asti);
    IF (asti = 0) OR (fde_p^.attach_count = 0) THEN
      RETURN; {----->
    IFEND;

    mmp$asid (asti, asid);
    xsva.asid := asid;
    xsva.offset := 0;
    aste_p := ^mmv$ast_p^ [asti];
    mmp$initialize_find_next_pfti (xsva, 7fffffff(16), include_partial_pages, psc_all, aste_p, pfti);

    WHILE pfti <> 0 DO
      IF (mmv$pft_p^ [pfti].aste_p <> NIL) AND (mmv$pft_p^ [pfti].aste_p^.sfid = rb.sfid) THEN
        mmp$get_inhibit_io_status (mmv$pft_p^ [pfti].ijl_ordinal, TRUE {lock ajl} , inhibit_io, ijle_p);
        IF NOT inhibit_io THEN
          save_v := mmv$pt_p^ [mmv$pft_p^ [pfti].pti].v;
          mmv$pt_p^ [mmv$pft_p^ [pfti].pti].v := FALSE;
          mmp$sva_purge_all_page_map (mmv$pft_p^ [pfti].sva);
          IF (NOT mmv$pt_p^ [mmv$pft_p^ [pfti].pti].m) AND (mmv$pft_p^ [pfti].locked_page =
                mmc$lp_not_locked) AND (mmv$pft_p^ [pfti].active_io_count = 0) THEN
            mmp$delete_pt_entry (pfti, TRUE);
            mmp$relink_page_frame (pfti, mmc$pq_free);
          ELSEIF mmv$pt_p^ [mmv$pft_p^ [pfti].pti].m AND (mmv$pft_p^ [pfti].locked_page =
                mmc$lp_not_locked) AND (mmv$pft_p^ [pfti].queue_id > mmc$pq_first_valid_in_pt) THEN
            IF rb.request = mmc$ssr1_flush_delete_seg_sfid THEN

{             MMP$REMOVE_PAGE_FROM_JWS does not neccessarily write the page to disk.  If it doesn't we may
{             have a problem in the future when we really want the page to go out to disk.

              mmv$pt_p^ [mmv$pft_p^ [pfti].pti].v := save_v;
              mmp$remove_page_from_jws (pfti, cst_p^.ijle_p, FALSE {= relink when Avail Mod Q max} , mcount,
                    rcount, ignore_relink_status);
              rb.pages_not_deleted := rb.pages_not_deleted + 1;
            ELSE {rb.request = mmc$ssr1_free_delete_seg_sfid }
              mmp$delete_pt_entry (pfti, TRUE);
              mmp$relink_page_frame (pfti, mmc$pq_free);
            IFEND;
          ELSEIF rb.request = mmc$ssr1_flush_delete_seg_sfid THEN
            mmv$pt_p^ [mmv$pft_p^ [pfti].pti].v := save_v;
            rb.pages_not_deleted := rb.pages_not_deleted + 1;
          IFEND;
          jmp$unlock_ajl (ijle_p);
        ELSE { inhibit_io
          IF rb.request = mmc$ssr1_flush_delete_seg_sfid THEN
            rb.pages_not_deleted := rb.pages_not_deleted + 1;
          IFEND;
        IFEND;
      IFEND;
      mmp$find_next_pfti (pfti);
    WHILEND;

    IF aste_p <> NIL THEN
      IF aste_p^.pages_in_memory = 0 THEN
        IF jmp$ijl_block_valid (aste_p^.ijl_ordinal) AND (jmv$ijl_p.
              block_p^ [aste_p^.ijl_ordinal.block_number].index_p^ [aste_p^.ijl_ordinal.block_index].
              entry_status <> jmc$ies_entry_free) THEN
          mmp$change_asid (aste_p, asid, 0, 0);
        ELSE
          fde_p^.asti := 0;
        IFEND;
        mmp$free_asid (asid, aste_p);
      IFEND;
    IFEND;

  PROCEND mmp$mtr_r1_server_seg_request;
?? TITLE := 'MMP$MTR_RING1_SEGMENT_REQUEST' ??
?? EJECT ??

{--------------------------------------------------------------------------------------------------------
{name:
{  mmp$mtr_ring1_segment_request
{purpose:
{ This procedure some ring 1 requests for segments.
{--------------------------------------------------------------------------------------------------------

  PROCEDURE [XDCL] mmp$mtr_ring1_segment_request
    (VAR rb: mmt$rb_ring1_segment_request;
         cst_p: ^ost$cpu_state_table);

    VAR
      asid: ost$asid,
      asid_can_be_deleted: boolean,
      aste_p: ^mmt$active_segment_table_entry,
      asti: mmt$ast_index,
      fde_p: gft$locked_file_desc_entry_p,
      first_image_pfti: 0 .. 0ffffffff(16),
      ijle_p: ^jmt$initiated_job_list_entry,
      io_already_active: boolean,
      io_count: mmt$active_io_count,
      io_id: mmt$io_identifier,
      j: integer,
      last_written_pfti: mmt$page_frame_index,
      old_sfid: gft$system_file_identifier,
      page_count: integer,
      pfti: mmt$page_frame_index,
      sdte_p: ^mmt$segment_descriptor,
      sdtxe_p: ^mmt$segment_descriptor_extended,
      sva: ost$system_virtual_address,
      wmp_status: mmt$write_modified_pages_status;

    VAR
      v$nowait_wait: [READ, STATIC] array [boolean] of ost$wait := [osc$nowait, osc$wait],
      v$unrecovered_files: [STATIC] integer := 0,
      v$unrecovered_pages: [STATIC] integer := 0;

    io_id.specified := FALSE;

    mmv$ring1_request_trace [$INTEGER (rb.request)] := mmv$ring1_request_trace [$INTEGER (rb.request)] + 1;

    rb.status.normal := TRUE;
    sva.asid := 0;

    CASE rb.request OF
    = mmc$sr1_detach_file =
      gfp$mtr_get_locked_fde_p (rb.sfid, cst_p^.ijle_p, fde_p);
      mmp$get_verify_asti_in_fde (fde_p, rb.sfid, cst_p^.ijl_ordinal, asti);
      IF asti <> 0 THEN
        aste_p := ^mmv$ast_p^ [asti];
        IF (fde_p^.queue_status = gfc$qs_job_shared) AND (aste_p^.queue_id <= mmc$pq_shared_last) THEN

{ The file is job-shared so there can be no modified pages; the pages are already in the shared queue so
{ they do not need to be removed from a job working set.  Therefore, nothing needs to be done.

          RETURN; {----->
        IFEND;
        mmp$asid (asti, asid);
        sva.asid := asid;
      ELSE
        RETURN; {----->
      IFEND;

    = mmc$sr1_delete_seg_sfid, mmc$sr1_flush_delete_seg_sfid =
      gfp$mtr_get_locked_fde_p (rb.sfid, cst_p^.ijle_p, fde_p);
      mmp$get_verify_asti_in_fde (fde_p, rb.sfid, cst_p^.ijl_ordinal, asti);
      IF asti <> 0 THEN
        mmp$asid (asti, asid);
        sva.asid := asid;
        aste_p := ^mmv$ast_p^ [asti];
      ELSE
        RETURN; {----->
      IFEND;

    = mmc$sr1_delete_job_seg_by_sfid =
      gfp$mtr_get_locked_fde_p (rb.sfid, cst_p^.ijle_p, fde_p);
      mmp$get_verify_asti_in_fde (fde_p, rb.sfid, cst_p^.ijl_ordinal, asti);
      IF (asti = 0) OR (mmv$ast_p^ [asti].ijl_ordinal <> cst_p^.ijl_ordinal) THEN
        RETURN; {----->
      IFEND;
      mmp$asid (asti, asid);
      sva.asid := asid;
      aste_p := ^mmv$ast_p^ [asti];

    = mmc$sr1_flush_avail_modified =
      gfp$mtr_get_fde_p (rb.sfid, cst_p^.ijle_p, fde_p);
      mmp$get_verify_asti_in_fde (fde_p, rb.sfid, cst_p^.ijl_ordinal, asti);
      IF asti <> 0 THEN
        mmp$asid (asti, asid);
        mmp$replenish_free_queues (asid);
      IFEND;
      RETURN; {----->

    = mmc$sr1_get_highest_offset =
      rb.highest_offset := 0;
      gfp$mtr_get_locked_fde_p (rb.file_sfid, cst_p^.ijle_p, fde_p);
      mmp$get_verify_asti_in_fde (fde_p, rb.file_sfid, cst_p^.ijl_ordinal, asti);
      IF asti = 0 THEN
        RETURN; {----->
      IFEND;
      aste_p := ^mmv$ast_p^ [asti];
      pfti := aste_p^.pft_link.fwd;

    /locate_highest_offset/
      WHILE pfti <> 0 DO
        IF mmv$pft_p^ [pfti].sva.offset > rb.highest_offset THEN
          rb.highest_offset := mmv$pft_p^ [pfti].sva.offset;
        IFEND;
        pfti := mmv$pft_p^ [pfti].segment_link.fwd;
      WHILEND /locate_highest_offset/;

      RETURN; {----->

    = mmc$sr1_delete_seg_segnum, mmc$sr1_flush_seg_segnum =
      sdtxe_p := mmp$get_sdtx_entry_p (cst_p^.xcb_p, rb.segnum);
      gfp$mtr_get_locked_fde_p (sdtxe_p^.sfid, cst_p^.ijle_p, fde_p);
      mmp$get_verify_asti_in_fde (fde_p, sdtxe_p^.sfid, cst_p^.ijl_ordinal, asti);
      IF asti <> 0 THEN
        mmp$asid (asti, asid);
        sva.asid := asid;
        aste_p := ^mmv$ast_p^ [asti];
      ELSE
        RETURN; {----->
      IFEND;

    = mmc$sr1_commit_memory =
      first_image_pfti := osv$180_memory_limits.deadstart_upper DIV osv$page_size;
      FOR pfti := first_image_pfti TO UPPERBOUND (mmv$pft_p^) DO
        IF mmv$pft_p^ [pfti].queue_id = mmc$pq_free THEN
          mmv$reassignable_page_frames.soon := mmv$reassignable_page_frames.soon + 1;
          mmp$link_page_frame_to_queue (pfti, ^mmv$pft_p^ [pfti]);
        IFEND;
      FOREND;
      mmv$image_file.active := FALSE;
      RETURN; {----->

    = mmc$sr1_free_image_pages =
      mmp$free_image_pages_mtr;
      RETURN; {----->

    = mmc$sr1_replace_sfid =

{ The SFID of a permanent file which is awaiting recovery is updated in the ast entry with the new SFID.
{ (The new SFID residence is also gft$tr_system_wait_recovery.)
{ If the ast index passed in is not correct, the AST must be searched.
{ IJL ordinals do NOT have to be checked because these can only be perm files.

      aste_p := NIL;
      IF (rb.asti <> 0) AND (rb.asti <= UPPERBOUND (mmv$ast_p^)) AND mmv$ast_p^ [rb.asti].in_use THEN
        IF mmv$ast_p^ [rb.asti].sfid = rb.new_sfid THEN {sfid already replaced
          RETURN; {----->
        ELSEIF mmv$ast_p^ [rb.asti].sfid = rb.old_sfid THEN
          aste_p := ^mmv$ast_p^ [rb.asti];
        IFEND;
      IFEND;

      IF aste_p = NIL THEN
        old_sfid := rb.old_sfid;

      /ast_search/
        FOR asti := LOWERBOUND (mmv$ast_p^) TO UPPERBOUND (mmv$ast_p^) DO
          IF mmv$ast_p^ [asti].in_use AND (mmv$ast_p^ [asti].sfid = old_sfid) THEN
            rb.asti := asti;
            aste_p := ^mmv$ast_p^ [asti];
            EXIT /ast_search/; {----->
          IFEND;
        FOREND /ast_search/;
      IFEND;

      IF aste_p = NIL THEN
        RETURN; {----->
      IFEND;

      aste_p^.sfid := rb.new_sfid;
      gfp$mtr_get_locked_fde_p (rb.new_sfid, cst_p^.ijle_p, fde_p);
      fde_p^.asti := rb.asti;
      RETURN; {----->

    = mmc$sr1_end_job_recovery =

      FOR pfti := LOWERBOUND (mmv$pft_p^) TO UPPERBOUND (mmv$pft_p^) DO
        IF (mmv$pft_p^ [pfti].aste_p <> NIL) AND (mmv$pft_p^ [pfti].aste_p^.sfid.residence =
              gfc$tr_system_wait_recovery) AND (mmv$pft_p^ [pfti].queue_id <> mmc$pq_free) THEN
          asid := mmv$pft_p^ [pfti].sva.asid;
          aste_p := mmv$pft_p^ [pfti].aste_p;
          ijle_p := jmf$ijle_p (mmv$pft_p^ [pfti].ijl_ordinal);
          IF (ijle_p^.swap_status >= jmc$iss_initiate_swapout_io) AND
                (ijle_p^.swap_status <= jmc$iss_swapout_io_complete) THEN
            tmp$reissue_monitor_request;
            tmp$cause_task_switch;
          IFEND;
          mmp$delete_pt_entry (pfti, TRUE);
          mmp$relink_page_frame (pfti, mmc$pq_free);
          IF aste_p^.pages_in_memory = 0 THEN
            mmp$free_asid (asid, aste_p);
            v$unrecovered_files := v$unrecovered_files + 1;
          IFEND;
          v$unrecovered_pages := v$unrecovered_pages + 1;
          IF ((ijle_p^.swap_status >= jmc$iss_job_idle_tasks_complete) AND
                (ijle_p^.swap_status <= jmc$iss_swapped_io_cannot_init)) OR
                (ijle_p^.swap_status = jmc$iss_swapped_io_complete) THEN
            jsp$recalculate_swapped_pages (ijle_p, 1);
          IFEND;
        IFEND;
      FOREND;

      rb.unrecovered_files := v$unrecovered_files;
      rb.unrecovered_pages := v$unrecovered_pages;
      RETURN; {----->

    = mmc$sr1_make_mfw_cache =
      mtv$monitor_segment_table.st [#SEGMENT (osv$mainframe_wired_heap)].ste.vl := osc$vl_regular_segment;
      mtv$nos_segment_table_p^.st [#SEGMENT (osv$mainframe_wired_heap)].ste.vl := osc$vl_regular_segment;
      #PURGE_BUFFER (osc$purge_all_page_seg_map, sva);
      RETURN; {----->

    = mmc$sr1_remove_job_shared_pages =
      gfp$mtr_get_locked_fde_p (rb.sfid, cst_p^.ijle_p, fde_p);
      IF fde_p^.asti <> 0 THEN
        mmp$remove_jws_to_shared_pages (fde_p, cst_p, rb);
      IFEND;
      RETURN; {----->

    = mmc$sr1_remove_detached_pages =
      gfp$mtr_get_locked_fde_p (rb.sfid, cst_p^.ijle_p, fde_p);
      mmp$get_verify_asti_in_fde (fde_p, rb.sfid, cst_p^.ijl_ordinal, asti);
      IF (asti <> 0) AND (mmv$ast_p^ [asti].queue_id = mmc$pq_job_working_set) AND
            (mmv$ast_p^ [asti].ijl_ordinal = cst_p^.ijl_ordinal) THEN
        mmp$asid (asti, asid);
        sva.asid := asid;
        sva.offset := 0;
        aste_p := ^mmv$ast_p^ [asti];
        mmp$remove_detached_pages (sva, aste_p, cst_p^.ijl_ordinal);
      IFEND;
      RETURN; {----->

    = mmc$sr1_change_swap_file_queue =
      gfp$mtr_get_locked_fde_p (rb.sfid, cst_p^.ijle_p, fde_p);
      mmp$get_verify_asti_in_fde (fde_p, rb.sfid, cst_p^.ijl_ordinal, asti);
      IF asti <> 0 THEN
        mmv$ast_p^ [asti].ijl_ordinal := jmv$system_ijl_ordinal;
        mmv$ast_p^ [asti].queue_id := mmc$pq_shared_pf_non_execute;
      IFEND;
      fde_p^.queue_status := gfc$qs_global_shared;
      RETURN; {----->

    = mmc$sr1_share_global_logs =
      gfp$mtr_get_locked_fde_p (rb.sfid, cst_p^.ijle_p, fde_p);
      fde_p^.queue_status := gfc$qs_global_shared;
      IF fde_p^.asti <> 0 THEN
        mmp$remove_jws_to_shared_pages (fde_p, cst_p, rb);
      IFEND;
      RETURN; {----->

    ELSE
      mtp$set_status_abnormal ('MM', mme$invalid_request, rb.status);
      RETURN; {----->
    CASEND;

    sva.offset := 0;
    asid_can_be_deleted := NOT ((rb.request = mmc$sr1_detach_file) OR
          (rb.request = mmc$sr1_flush_seg_segnum));
    IF asid_can_be_deleted AND fde_p^.flags.global_template_file THEN

{!!!!!!This check is required in order to run multiple job templates. It may be worth
{ some time to look into the file kinds of multiple job template segments. There appear to
{ some strange uses of file kind.

      IF NOT syv$user_templates THEN
        mtp$error_stop ('MM - tried to delete template segment');
      IFEND;
    IFEND;

    IF (rb.request = mmc$sr1_detach_file) OR (rb.request = mmc$sr1_flush_delete_seg_sfid) OR
          (rb.request = mmc$sr1_flush_seg_segnum) THEN
      mmp$mm_write_modified_pages (sva, 7ffffff0(16), fde_p, aste_p, ioc$write_page, rb.init_new_io,
            (rb.request = mmc$sr1_detach_file), io_id, io_count, io_already_active, last_written_pfti,
            wmp_status);
      mmp$process_wmp_status (wmp_status, last_written_pfti, v$nowait_wait [rb.wait_for_io_complete],
            rb.init_new_io, rb.status);
      IF ((wmp_status <> mmc$wmp_io_complete) AND (wmp_status <> mmc$wmp_io_active)) OR
            ((wmp_status = mmc$wmp_io_active) AND rb.wait_for_io_complete) THEN
        asid_can_be_deleted := FALSE;
      IFEND;
    IFEND;

    IF asid_can_be_deleted THEN
      fde_p^.asti := 0;
      mmp$mm_free_pages (sva, 7fffffff(16), aste_p, TRUE, page_count);
    IFEND;

  PROCEND mmp$mtr_ring1_segment_request;

?? TITLE := 'MMP$FETCH_STACK_SEGMENT_INFO' ??
?? EJECT ??
{-------------------------------------------------------------
*copyc mmh$fetch_stack_segment_info
{-------------------------------------------------------------


  PROCEDURE [XDCL] mmp$fetch_stack_segment_info
    (    xcb_p: ^ost$execution_control_block;
         ring: ost$valid_ring;
         set_length_to_zero: boolean;
     VAR stack_segment_number: ost$segment;
     VAR maximum_segment_length: ost$segment_length;
     VAR found: boolean);

    VAR
      fde_p: gft$locked_file_desc_entry_p,
      ijl_ordinal: jmt$ijl_ordinal,
      ijle_p: ^jmt$initiated_job_list_entry,
      limit: amt$file_limit,
      sdt_p: mmt$max_sdt_p,
      sdtx_p: mmt$max_sdtx_p,
      segnum: ost$segment,
      status: syt$monitor_status;

    tmp$obtain_ijl_ordinal_from_ptl (xcb_p^.global_task_id, ijl_ordinal);
    ijle_p := jmf$ijle_p (ijl_ordinal);
    mmp$get_max_sdt_sdtx_pointer (xcb_p, sdt_p, sdtx_p);

    FOR segnum := 0 TO xcb_p^.xp.segment_table_length DO
      IF (mmc$sa_stack IN sdtx_p^.sdtx_table [segnum].software_attribute_set) AND
            (sdt_p^.st [segnum].ste.r1 = ring) THEN
        found := TRUE;
        stack_segment_number := segnum;
        gfp$mtr_get_locked_fde_p (sdtx_p^.sdtx_table [segnum].sfid, ijle_p, fde_p);
        maximum_segment_length := fde_p^.file_limit;
        IF set_length_to_zero THEN
          fde_p^.eoi_byte_address := 0;
          fde_p^.eoi_state := mmc$eoi_actual;
          fde_p^.flags.eoi_modified := TRUE;
        IFEND;
        RETURN; {----->
      IFEND;
    FOREND;

    found := FALSE;

  PROCEND mmp$fetch_stack_segment_info;
?? TITLE := 'MMP$PERIODIC_CALL' ??
?? EJECT ??
{------------------------------------------------------------------
{This procedure is called periodically to age the Shared Page Queues.
{
{   MMP$PERIODIC_CALL
{
{-----------------------------------------------------------------------


  PROCEDURE [XDCL] mmp$periodic_call;

    CONST
      c$relink_page_reject_count_max = 100; { This values are just an initial attempt ...

    VAR
      fde_p: gft$locked_file_desc_entry_p,
      aste_p: ^mmt$active_segment_table_entry,
      jcb_p: ^jmt$job_control_block,
      ajlo: jmt$ajl_ordinal,
      inhibit_io: boolean,
      clock: ost$free_running_clock,
      cptime: integer,
      last_pfti: mmt$page_frame_index,
      new_asid: ost$asid,
      new_asti: mmt$ast_index,
      new_aste_p: ^mmt$active_segment_table_entry,
      pfte_p: ^mmt$page_frame_table_entry,
      pfti: mmt$page_frame_index,
      ijle_p: ^jmt$initiated_job_list_entry,
      pt_full_status: mmt$pt_full_status,
      taskid: ost$global_task_id,
      mcount: integer,
      mcount1: integer,
      rcount: integer,
      rcount1: integer,
      scount: integer,
      scount1: integer,
      relink_page_status: mmt$relink_page_status,
      relink_page_reject_count: integer,
      relink_status: mmt$relink_page_status,
      aggressive_aging: boolean,
      maxws_left_for_user_jobs: mmt$page_frame_index,
      max_idle_candidate: jmt$dispatching_priority,
      blocked_dp_set: jmt$dispatching_priority_set,
      block_lower_prios: boolean,
      dp: jmt$dispatching_priority,
      i: integer,
      idle_candidates: jmt$dispatching_priority_set,
      total_idle: ost$free_running_clock,
      user_dp_set: [STATIC, READ] jmt$dispatching_priority_set := $jmt$dispatching_priority_set
            [7, 8, 9, 10, 11, 12, 13, 14],
      queue: mmt$page_frame_queue_id,
      temp_max_working_set_size: mmt$page_frame_index,
      temp_target: mmt$page_frame_index,
      time_last_idle_dispatching_scan: [STATIC] integer := 5000000,
      time_last_io_error_q_scan: [STATIC] integer := 5000000,
      time_last_shared_queue_scan: [STATIC] integer := 5000000,
      time_last_full_jws_scan: [STATIC] integer := 5000000,
      time_next_free_astes: [STATIC] integer := 0,
      time_next_scan_wait_not_queued: [STATIC] integer := 600000000,
      pass: integer,
      pti: ost$page_table_index,
      asid: ost$asid,
      asti: mmt$ast_index,
      system_jws: mmt$page_frame_index,
      gpqle_p: ^mmt$global_page_queue_list_ent,
      dynamic_aging_disabled: boolean,
      ignore_overrun_mod_q: boolean,
      new_dynamic_increment_count: integer;

    VAR
      idle_dispatching_entry_p: ^jmt$idle_dispatching_entry;

?? NEWTITLE := '[inline] P$GET_DYNAMIC_INCREMENT_COUNT', EJECT ??

{ PURPOSE: Determine Dynamic Aging working limits
{
{   We try to keep the reassignable pages around MMV$DYNAMIC_AVAILABLE_FLOOR.
{   When available pages goes higher than MMV$DYNAMIC_AVAILABLE_FLOOR, we will
{   give up to the MMV$DYNAMIC_AVAILABLE_MAX limit extra pages to the shared Qs.
{   We will take up to the MMV$DYNAMIC_AVAILABLE_MIN limit extra pages from the
{   shared Qs, when we drop beyond than MMV$DYNAMIC_AVAILABLE_FLOOR with
{   the "soon be available" pages.
{
{   We do not give extra pages to the Qs nor take pages away, when reassignable pages
{   falls below MMV$DYNAMIC_AVAILABLE_FLOOR and soon be available pages remains
{   higher than MMV$DYNAMIC_AVAILABLE_FLOOR.
{
{   This situation means that we have a high number of modified pages which are beeing
{   written out to disk. Soon, we will have enough pages available.
{   If we would take away too many extra pages, we would have the following impacts:
{     - we would fill the available modified Q even more and unnecessarly saturate
{       the IO Q, what would slow down the response time.
{     - we would end up with too many available pages in the next aging cycle and
{       therefore boost up the shared Qs. This could cause the Qs to oscilate.
{
{   We do not give more than MMV$DYNAMIC_POS_INCREMENT_MAX pages to the Qs nor
{   take more than MMV$DYNAMIC_POS_INCREMENT_MAX from the Qs for each age interval.
{
{   MMV$DYNAMIC_AVAIL_EMERGENCY_MIN works as a security break. We take all extra pages
{   away from all shared Qs, when available pages drops below
{   MMV$DYNAMIC_AVAIL_EMERGENCY_MIN.
{   Dynamic aging is also disabled, when aggressive aging takes place or when
{   it's manually deactivated.
{
{   NEW_DYNAMIC_INCREMENT_COUNT is the new increment of total pages we can give
{   or we have to take away from all shared Qs.
{   When NEW_DYNAMIC_INCREMENT_COUNT is negative, we have to take this amount of
{   pages away from the Qs. When it's positive, we can give up to this amount of
{   pages to all Qs.

    PROCEDURE [INLINE] p$get_dynamic_increment_count
      (VAR dynamic_aging_disabled: boolean;
       VAR new_dynamic_increment_count: integer);

      dynamic_aging_disabled := mmv$reassignable_page_frames.now < mmv$dynamic_avail_emergency_min;

      IF dynamic_aging_disabled THEN
        mmv$dynamic_aging_statistics.emergency_disabled := mmv$dynamic_aging_statistics.emergency_disabled +
              1;
        RETURN; {----->
      IFEND;

      IF mmv$reassignable_page_frames.now > mmv$dynamic_available_floor THEN
        mmv$dynamic_aging_statistics.free_now_exceeds_floor_count :=
              mmv$dynamic_aging_statistics.free_now_exceeds_floor_count + 1;
        IF mmv$reassignable_page_frames.now > mmv$dynamic_available_max THEN
          new_dynamic_increment_count := mmv$dynamic_available_max - mmv$dynamic_available_floor;
          mmv$dynamic_aging_statistics.free_now_exceeds_max_count :=
                mmv$dynamic_aging_statistics.free_now_exceeds_max_count + 1;
        ELSE
          new_dynamic_increment_count := mmv$reassignable_page_frames.now - mmv$dynamic_available_floor;
        IFEND;
        IF new_dynamic_increment_count > mmv$dynamic_pos_increment_max THEN
          mmv$dynamic_aging_statistics.increment_count_exceeds_max_pos :=
                mmv$dynamic_aging_statistics.increment_count_exceeds_max_pos + 1;
          new_dynamic_increment_count := mmv$dynamic_pos_increment_max;
        IFEND;
        mmv$dynamic_aging_statistics.increment_count_pos :=
              mmv$dynamic_aging_statistics.increment_count_pos + new_dynamic_increment_count;

      ELSEIF mmv$reassignable_page_frames.now + mmv$reassignable_page_frames.soon >=
            mmv$dynamic_available_floor THEN
        mmv$dynamic_aging_statistics.free_soon_exceeds_floor_count :=
              mmv$dynamic_aging_statistics.free_soon_exceeds_floor_count + 1;
        new_dynamic_increment_count := 0;

      ELSE
        mmv$dynamic_aging_statistics.floor_exceeds_free_now_count :=
              mmv$dynamic_aging_statistics.floor_exceeds_free_now_count + 1;
        IF mmv$reassignable_page_frames.now < mmv$dynamic_available_min THEN
          new_dynamic_increment_count := mmv$dynamic_available_min - mmv$dynamic_available_floor;
          mmv$dynamic_aging_statistics.min_exceeds_free_now_count :=
                mmv$dynamic_aging_statistics.min_exceeds_free_now_count + 1;
        ELSE
          new_dynamic_increment_count := mmv$reassignable_page_frames.now - mmv$dynamic_available_floor;
        IFEND;
        IF new_dynamic_increment_count < mmv$dynamic_neg_increment_max THEN
          new_dynamic_increment_count := mmv$dynamic_neg_increment_max;
          mmv$dynamic_aging_statistics.increment_count_exceeds_max_neg :=
                mmv$dynamic_aging_statistics.increment_count_exceeds_max_neg + 1;
        IFEND;
        mmv$dynamic_aging_statistics.increment_count_neg :=
              mmv$dynamic_aging_statistics.increment_count_neg + new_dynamic_increment_count;
      IFEND;

    PROCEND p$get_dynamic_increment_count;
?? OLDTITLE ??
?? NEWTITLE := '[inline] P$RESET_DYNAMIC_AGING', EJECT ??

{PURPOSE:
{   Reset Dynamic Aging.
{   All actual values are set back to the nominal values.
{
{ NOTE: We have to loop through all Site Defined Shared Queues and not only
{       through the active Qs, as we do not know, if a Q was ever active
{       between two calls to this request.
{       This sould be a very seldom case, anyway.
{
{  When this procedure needs to be changed, do not forget to also update
{  p$reset_dynamic_aging in MMM$RING1_HELPER

    PROCEDURE p$reset_dynamic_aging;

      VAR
        queue: mmt$page_frame_queue_id;

      mmv$dynamic_aging_statistics.reset_count := mmv$dynamic_aging_statistics.reset_count + 1;
      mmv$dynamic_aging_in_use := FALSE;
      FOR queue := mmc$pq_shared_first TO mmc$pq_shared_last DO
        mmv$gpql [queue].queue_cyclic_age := 0;

        mmv$gpql [queue].age_interval_actual_modified := mmv$gpql [queue].age_interval_nominal_modified;
        mmv$gpql [queue].age_interval_actual_unmodified := mmv$gpql [queue].age_interval_nominal_unmodified;

        mmv$gpql [queue].minimum_actual := mmv$gpql [queue].minimum_nominal;

        mmv$gpql [queue].maximum_actual := mmv$gpql [queue].maximum_nominal;
        mmv$gpql [queue].maximum_actual_cycle := mmv$gpql [queue].maximum_nominal;

      FOREND;

    PROCEND p$reset_dynamic_aging;
?? OLDTITLE ??
?? NEWTITLE := '[inline] P$UPDATE_AGE_INTERVAL', EJECT ??

    PROCEDURE [INLINE] p$update_age_interval
      (    max: 0 .. 255;
           min: 0 .. 255;
           inc: -255 .. 255;
       VAR statistic: integer;
       VAR actual: 0 .. 255);

      VAR
        aic: integer;

      aic := actual + inc;
      IF aic > max THEN
        aic := max;
      ELSEIF aic < min THEN
        aic := min;
      IFEND;

      statistic := statistic + (aic - actual);
      actual := aic;

    PROCEND p$update_age_interval;
?? OLDTITLE ??
?? NEWTITLE := '[inline] P$UPDATE_LIMIT', EJECT ??

    PROCEDURE [INLINE] p$update_limit
      (    new_dynamic_increment_count: integer;
           max: 0 .. osc$max_page_frames;
           min: 0 .. osc$max_page_frames;
           inc: -osc$max_page_frames .. osc$max_page_frames;
           inc_pc: 0 .. osc$max_page_frames;
       VAR statistic: integer;
       VAR actual: 0 .. osc$max_page_frames);

      VAR
        limit: integer;

      limit := actual + inc + (new_dynamic_increment_count * inc_pc DIV 100);
      IF limit > max THEN
        limit := max;
      ELSEIF limit < min THEN
        limit := min;
      IFEND;

      statistic := statistic + (limit - actual);
      actual := limit;

    PROCEND p$update_limit;
?? OLDTITLE ??
?? NEWTITLE := '[inline] P$UPDATE_AGING_ATTRIBUTES', EJECT ??

{PURPOSE:
{  - Set mmv$dynamic_aging_in_use
{  - Reset the Q cyclic age to 0
{  - Adjust the Q aging attributes in the following manner:
{    - The actual AICs are updated by the incremental value in the range of
{      the Min or Max values.
{    - The actual Minimum and Maximum limits are updated by the incremental
{      value and the percentage value of new_dynamic_increment_count in the
{      rage of the Min and Max values.
{    - maximum_actual_cycle value is set to maximum_actual.

    PROCEDURE p$update_aging_attributes
      (    new_dynamic_increment_count: integer;
       VAR q_statistic: mmt$dynamic_aging_q_statistic;
       VAR entry: mmt$global_page_queue_list_ent);

      VAR
        count: integer;

      mmv$dynamic_aging_in_use := TRUE;
      entry.queue_cyclic_age := 0;
      IF new_dynamic_increment_count > 0 THEN
        p$update_age_interval (entry.age_interval_modified_max, entry.age_interval_modified_min,
              entry.age_interval_pos_inc_modified, q_statistic.aic_mod_increment_total,
              entry.age_interval_actual_modified);
        p$update_age_interval (entry.age_interval_unmodified_max, entry.age_interval_unmodified_min,
              entry.age_interval_pos_inc_unmodified, q_statistic.aic_unmod_increment_total,
              entry.age_interval_actual_unmodified);
        p$update_limit (new_dynamic_increment_count, entry.minimum_max, entry.minimum_min,
              entry.minimum_pos_inc, entry.minimum_pos_inc_pc, q_statistic.min_increment_total,
              entry.minimum_actual);
        p$update_limit (new_dynamic_increment_count, entry.maximum_max, entry.maximum_min,
              entry.maximum_pos_inc, entry.maximum_pos_inc_pc, q_statistic.max_increment_total,
              entry.maximum_actual);
        entry.maximum_actual_cycle := entry.maximum_actual;
      ELSE
        p$update_age_interval (entry.age_interval_modified_max, entry.age_interval_modified_min,
              -entry.age_interval_neg_inc_modified, q_statistic.aic_mod_decrement_total,
              entry.age_interval_actual_modified);
        p$update_age_interval (entry.age_interval_unmodified_max, entry.age_interval_unmodified_min,
              -entry.age_interval_neg_inc_unmodified, q_statistic.aic_unmod_decrement_total,
              entry.age_interval_actual_unmodified);
        p$update_limit (new_dynamic_increment_count, entry.minimum_max, entry.minimum_min,
              -entry.minimum_neg_inc, entry.minimum_neg_inc_pc, q_statistic.min_decrement_total,
              entry.minimum_actual);
        p$update_limit (new_dynamic_increment_count, entry.maximum_max, entry.maximum_min,
              -entry.maximum_neg_inc, entry.maximum_neg_inc_pc, q_statistic.max_decrement_total,
              entry.maximum_actual);
        entry.maximum_actual_cycle := entry.maximum_actual;
      IFEND;

    PROCEND p$update_aging_attributes;
?? OLDTITLE ??
?? NEWTITLE := '[inline] P$UPDATE_MAXIMUM_CYCLE', EJECT ??

{PURPOSE:
{ Update Maximum Cycle for non-selected aging cycles.
{   We adjust the Q aging attributes in the following manner:
{     - The maximum_actual_cycle limit is updated by the incremental
{       value and the percentage value of new_dynamic_increment_count in the
{       rage of the Min and Max values.

    PROCEDURE p$update_maximum_cycle
      (    new_dynamic_increment_count: integer;
       VAR entry: mmt$global_page_queue_list_ent);

      VAR
        ignore_statistic: integer;

      mmv$dynamic_aging_in_use := TRUE;
      entry.maximum_actual_cycle := entry.maximum_actual;
      ignore_statistic := 0;
      IF new_dynamic_increment_count > 0 THEN
        p$update_limit (new_dynamic_increment_count, entry.maximum_max, entry.maximum_min,
              entry.maximum_pos_inc, entry.maximum_pos_inc_pc, ignore_statistic, entry.maximum_actual_cycle);
      ELSE
        p$update_limit (new_dynamic_increment_count, entry.maximum_max, entry.maximum_min,
              -entry.maximum_neg_inc, entry.maximum_neg_inc_pc, ignore_statistic, entry.maximum_actual_cycle);
      IFEND;

    PROCEND p$update_maximum_cycle;
?? OLDTITLE ??
?? EJECT ??

{Age the shared and job working set queues if necessary:
{    shared - aged every few seconds
{    job (algorithm 0) - aged every few seconds and all pages not referenced since last time
{                        are removed
{    job (algorithm 1) - every few seconds a scan is made of each job. If job has used a TICKTIME of
{                        cp time, the working set is aged same as though a page fault occurred.
{***NOTE - if system is real low on free/avail pages, aging is forced even if not necessary.

{ Set the global maximum working set to the number of pages available to user jobs.
{ The size of a job's working set will be constrained to the lesser of the global maximum working set
{ and the maximum working set size value in the job control block, which is determined by the class attribute.
{ The value for the global maximum working set is the largest working set the system can accommodate.  Since
{ the System Job is also limited by the global maximum working set (mmv$max_working_set_size) the System Job
{ is effectively limited to 50% of the available space.  If the System Job is the only job, the maximum is
{ calculated as the maximum space available to the system job (although it is unlikely to grow that large).

    IF jmv$max_class_working_set = 0 THEN { =0 when system job is the only job.}
      system_jws := 0;
    ELSE
      system_jws := f$system_jobs_working_set ();
    IFEND;
    maxws_left_for_user_jobs := mmv$total_page_frames - mmv$gpql [mmc$pq_wired].
          pqle.count - mmv$gpql [mmc$pq_shared_io_error].pqle.count - mmv$gpql [mmc$pq_flawed].pqle.count -
          system_jws;
    FOR queue := mmc$pq_shared_first TO mmv$last_active_shared_queue DO
      maxws_left_for_user_jobs := maxws_left_for_user_jobs - mmv$gpql [queue].pqle.count;
    FOREND;
    IF jmv$max_class_working_set < maxws_left_for_user_jobs THEN
      temp_max_working_set_size := jmv$max_class_working_set;
    ELSE
      temp_max_working_set_size := maxws_left_for_user_jobs;
    IFEND;
    temp_target := (temp_max_working_set_size * jsv$swapped_page_entry_size DIV
          osv$page_size + 1) + mmv$aggressive_aging_level;
    IF temp_target > jmv$job_scheduler_table.scheduling_memory_levels.target THEN
      mmv$resident_job_target := temp_target;
    ELSE
      mmv$resident_job_target := jmv$job_scheduler_table.scheduling_memory_levels.target;
    IFEND;
    mmv$max_working_set_size := maxws_left_for_user_jobs - mmv$resident_job_target;
    IF mmv$max_working_set_size < 10 THEN
      mmv$max_working_set_size := 10;
    IFEND;

{ Check for "idle dispatching".  The basic algorithm is that if there are high priority CPU bound
{ jobs, mark the lower dispatching priorities as blocked in the idle dispatching controls.
{ Jobs with blocked dispatching priorities will be swapped out; the memory freed by swapping the
{ jobs will increase the size of the available queue.
{ Because cpu dispatching allocation circumvents the highest-dispatching-priority-first selection
{ algorithm, deciding which dispatching priority is "lower" gets a little tricky.  Cpu dispatching
{ allocation is not intended to reorder dispatching priorities though, so the idle dispatching
{ algorithm will not block a priority if a lower priority cannot be blocked because of dispatching
{ allocation.  Dispatching priorities which have been allocated a minimum percent of the CPU will
{ not be blocked.
{ Whenever there is any idle CPU time, all blocked priorities will be cleared.
{ NOTE:  A dispatching priority in a SET is converted so that the highest dispatching
{ priority in the SET corresponds to the leftmost bit in the SET. (See jmt$dispatching_priority.)

    clock := #FREE_RUNNING_CLOCK (0);
    IF (clock - time_last_idle_dispatching_scan) > jmv$scan_idle_dispatch_interval THEN
      total_idle := 0;
      FOR i := 0 TO (osv$cpus_physically_configured - 1) DO
        IF mtv$cst0 [i].processor_state = cmc$on THEN
          total_idle := total_idle + mtv$cst0 [i].cpu_idle_statistics.idle_no_io_active +
                mtv$cst0 [i].cpu_idle_statistics.idle_io_active;
        IFEND;
      FOREND;
      IF (total_idle > jmv$idle_dispatching_controls.controls [0].last_cp_time) OR
            ((jmv$idle_dispatching_controls.unblocked_priorities *
            tmv$dispatching_control_sets.ready_tasks) - jmv$idle_dispatching_controls.maximums_exceeded =
            $jmt$dispatching_priority_set []) THEN

{ Unblock all idled dispatching priorities.

        jmv$idle_dispatching_controls.unblocked_priorities := user_dp_set;
        FOR dp := jmc$priority_p1 TO jmc$priority_p8 DO
{Note, it's faster to not use the pointer
          jmv$idle_dispatching_controls.controls [dp].blocked := FALSE;
          jmv$idle_dispatching_controls.controls [dp].idle_noticed_once := FALSE;
          jmv$idle_dispatching_controls.controls [dp].timestamp := clock;
          jmv$idle_dispatching_controls.controls [dp].last_cp_time :=
                tmv$cpu_execution_statistics [dp].time_spent_in_job_mode +
                tmv$cpu_execution_statistics [dp].time_spent_in_mtr_mode;
        FOREND;
        jmp$set_scheduler_event (jmc$examine_swapin_queue);
        jmp$set_scheduler_event (jmc$examine_input_queue);
      ELSE

{ Checked for blocked priorities.  NOTE: P1 = 2 = minimum_dispatching_priority.
{ Idle candidates are priorities BELOW the lowest minimum-to-satisify priority.
{ If any CPU time has been used by the priority, update the time used and the timestamp.
{ If no CPU time has been used since a previous check, idle the priority if the
{ idle_dispatching_queue_time has been exceeded.

        idle_candidates := (user_dp_set - tmv$dispatching_controls.minimums_to_satisfy);

        max_idle_candidate := jmc$priority_p1;
        WHILE ((jmc$dp_conversion - max_idle_candidate) IN idle_candidates) DO
          max_idle_candidate := max_idle_candidate + 1;
        WHILEND;

        block_lower_prios := FALSE;
        FOR dp := (max_idle_candidate - 1) DOWNTO jmc$priority_p1 DO
          idle_dispatching_entry_p := ^jmv$idle_dispatching_controls.controls [dp];
          IF (idle_dispatching_entry_p^.last_cp_time <> (tmv$cpu_execution_statistics [dp].
                time_spent_in_job_mode + tmv$cpu_execution_statistics [dp].time_spent_in_mtr_mode)) AND
                (NOT block_lower_prios) THEN

            idle_dispatching_entry_p^.last_cp_time := tmv$cpu_execution_statistics [dp].
                  time_spent_in_job_mode + tmv$cpu_execution_statistics [dp].time_spent_in_mtr_mode;
            idle_dispatching_entry_p^.timestamp := clock;
            idle_dispatching_entry_p^.idle_noticed_once := FALSE;

          ELSE
            IF (((jmc$dp_conversion - dp) IN tmv$dispatching_control_sets.ready_tasks) AND
                  ((idle_dispatching_entry_p^.timestamp + jmv$job_scheduler_table.
                  idle_dispatching_queue_time) < clock)) OR (block_lower_prios) THEN

              IF idle_dispatching_entry_p^.idle_noticed_once THEN
                idle_dispatching_entry_p^.blocked := TRUE;
                jmv$idle_dispatching_controls.unblocked_priorities :=
                      jmv$idle_dispatching_controls.unblocked_priorities -
                      $jmt$dispatching_priority_set [jmc$dp_conversion - dp];
              ELSE
                idle_dispatching_entry_p^.idle_noticed_once := TRUE;
              IFEND;
              block_lower_prios := TRUE;
            ELSE
              idle_dispatching_entry_p^.idle_noticed_once := FALSE;
            IFEND;
          IFEND;
        FOREND;

      IFEND;
      jmv$idle_dispatching_controls.maximums_exceeded := $jmt$dispatching_priority_set [];
      jmv$idle_dispatching_controls.controls [0].last_cp_time := total_idle;
      jmv$idle_dispatching_controls.controls [0].timestamp := clock;
      time_last_idle_dispatching_scan := clock;
    IFEND; {time to scan idle dispatching}

{ Insert timed_wait_not_queued tasks into the timed wait queue if it is nearly time for them to be readied.

    clock := #FREE_RUNNING_CLOCK (0);
    IF clock >= time_next_scan_wait_not_queued THEN
      time_next_scan_wait_not_queued := clock + tmv$timed_wait_not_queued;
      tmp$check_timed_wait_not_queued (time_next_scan_wait_not_queued);
    IFEND;


    mmp$maintain_memory_thresholds;

    aggressive_aging := mmv$reassignable_page_frames.now + mmv$reassignable_page_frames.soon <=
          mmv$aggressive_aging_level + jsv$pages_needed_for_sfd;
    IF aggressive_aging THEN
      mmv$aging_statistics.aggressive_age_shared_queue := mmv$aging_statistics.aggressive_age_shared_queue +
            1;
    IFEND;

    IF aggressive_aging OR ((#FREE_RUNNING_CLOCK (0) - time_last_shared_queue_scan) >
          mmv$shared_queue_age_interval) THEN

      IF NOT aggressive_aging THEN
{ Set the mmv$page_queue_age_cycle to the next cycle.
        mmv$page_queue_age_cycle := (mmv$page_queue_age_cycle + 1) MOD (mmc$page_queue_age_cycle_max + 1);
      IFEND;

      jsp$adv_expired_swapped_jobs (jsc$isqi_swapped_io_not_init);
      jsp$adv_expired_swapped_jobs (jsc$isqi_swapped_io_completed);

      new_dynamic_increment_count := 0;
      IF mmv$dynamic_aging_enabled THEN
        p$get_dynamic_increment_count (dynamic_aging_disabled, new_dynamic_increment_count);
        IF (aggressive_aging OR dynamic_aging_disabled) AND mmv$dynamic_aging_in_use THEN
          p$reset_dynamic_aging;
        IFEND;
      IFEND;

      FOR queue := mmc$pq_shared_first TO mmv$last_active_shared_queue DO
        mcount := 0;
        rcount := 0;
        scount := 0;
        gpqle_p := ^mmv$gpql [queue];

{Here, we could check for aggressive level 1 or 2. For level 2, we could pass a special AIC
{and a special minimum. For level 1, we could, like now, pass 0 as the min - but what about
{the AIC?

        IF aggressive_aging THEN
          mmp$remove_stale_pages (gpqle_p^.pqle, gpqle_p^.age_interval_actual_modified,
                gpqle_p^.age_interval_actual_unmodified, NIL, NIL, mmc$pq_avail_modified, 0, mcount, rcount,
                scount);

        ELSEIF mmv$page_queue_age_cycle IN gpqle_p^.age_cycles THEN
          IF new_dynamic_increment_count <> 0 THEN { is 0 when disabled.

            IF (new_dynamic_increment_count > 0) AND
{         } (gpqle_p^.minimum_actual >= gpqle_p^.encrease_min_enable_dynamic_age) AND
{         } (gpqle_p^.pqle.count <= gpqle_p^.minimum_actual + gpqle_p^.encrease_min_enable_dynamic_age) THEN

              mmv$dynamic_aging_statistics.queue [queue].size_non_encrease_count :=
                    mmv$dynamic_aging_statistics.queue [queue].size_non_encrease_count + 1;

              IF gpqle_p^.queue_cyclic_age >= gpqle_p^.queue_age_interval THEN
                p$update_aging_attributes (-new_dynamic_increment_count, mmv$dynamic_aging_statistics.
                      queue [queue], gpqle_p^);
              ELSE
                gpqle_p^.queue_cyclic_age := gpqle_p^.queue_cyclic_age + 1;
              IFEND;
            ELSE

              p$update_aging_attributes (new_dynamic_increment_count, mmv$dynamic_aging_statistics.
                    queue [queue], gpqle_p^);
            IFEND;
          IFEND;

          mmp$remove_stale_pages (gpqle_p^.pqle, gpqle_p^.age_interval_actual_modified,
                gpqle_p^.age_interval_actual_unmodified, NIL, NIL, mmc$pq_avail_modified,
                gpqle_p^.minimum_actual, mcount, rcount, scount);

        ELSEIF new_dynamic_increment_count <> 0 THEN { is 0 when disabled.
          p$update_maximum_cycle (new_dynamic_increment_count, gpqle_p^);
        IFEND;

{ Trim the shared queue down to maximum size; page faulting for a shared page does not automatically trim the
{ shared queue.  A NIL ijl pointer is passed to the remove procedure; the ijl pointer is used only if the file
{ is a job file.  Pages in the shared queues cannot belong to a job file (the shared io error queue is an
{ exception).

        pfti := gpqle_p^.pqle.link.bkw;
        relink_page_reject_count := 0; { For now, we start over again for each Q
        WHILE (gpqle_p^.pqle.count > gpqle_p^.maximum_actual_cycle) AND (pfti <> 0) AND
              (relink_page_reject_count <= c$relink_page_reject_count_max) DO
          pfte_p := ^mmv$pft_p^ [pfti];
          last_pfti := pfti;
          pfti := pfte_p^.link.bkw;
          mmp$remove_page_from_jws (last_pfti, NIL {ijle_p} , FALSE {= relink when Avail Mod Q Max} , mcount1,
                rcount1, relink_status);
          IF relink_status = mmc$rps_avail_mod_queue_flooded THEN
            relink_page_reject_count := relink_page_reject_count + 1;
          IFEND;
          mcount := mcount + mcount1;
          rcount := rcount + rcount1;
        WHILEND;

        mmv$aging_statistics.age_shared_queue [queue].total_pages := mmv$aging_statistics.
              age_shared_queue [queue].total_pages + rcount;
        mmv$aging_statistics.age_shared_queue [queue].modified_pages :=
              mmv$aging_statistics.age_shared_queue [queue].modified_pages + mcount;
        mmv$aging_statistics.age_shared_queue [queue].pages_scanned := mmv$aging_statistics.
              age_shared_queue [queue].pages_scanned + scount;
      FOREND;

      time_last_shared_queue_scan := #FREE_RUNNING_CLOCK (0);
    IFEND;

    IF ((#FREE_RUNNING_CLOCK (0) - time_last_io_error_q_scan) > mmv$io_error_q_age_interval) THEN
      mmp$remove_stale_pages (mmv$gpql [mmc$pq_shared_io_error].pqle,
            mmv$gpql [mmc$pq_shared_io_error].age_interval_actual_modified,
            mmv$gpql [mmc$pq_shared_io_error].age_interval_actual_unmodified, NIL, NIL, mmc$pq_avail_modified,
            0, mcount, rcount, scount); {enforces "0" as minimum size
      time_last_io_error_q_scan := #FREE_RUNNING_CLOCK (0);
    IFEND;

    aggressive_aging := mmv$reassignable_page_frames.now + mmv$reassignable_page_frames.soon <=
          mmv$aggressive_aging_level + jsv$pages_needed_for_sfd;
    IF aggressive_aging THEN
      mmv$aging_statistics.aggressive_age_job_queues := mmv$aging_statistics.aggressive_age_job_queues + 1;
    IFEND;
    IF aggressive_aging OR mmv$reduce_jws_for_thrashing OR
          ((#FREE_RUNNING_CLOCK (0) - time_last_full_jws_scan) > mmv$jws_queue_age_interval) THEN

      FOR ajlo := LOWERBOUND (jmv$ajl_p^) TO jmv$max_ajl_ordinal_in_use DO
        tmp$set_lock (tmv$ptl_lock{, mtc$ignore});
        IF (jmv$ajl_p^ [ajlo].in_use <> 0) AND (jmv$ajl_p^ [ajlo].ijle_p <> NIL) THEN
          IF (jmv$ajl_p^ [ajlo].ijle_p^.swap_status = jmc$iss_executing) THEN

            jmv$ajl_p^ [ajlo].in_use := jmv$ajl_p^ [ajlo].in_use + jmc$lock_ajl;
            tmp$clear_lock (tmv$ptl_lock);
            ijle_p := jmv$ajl_p^ [ajlo].ijle_p;
            IF ijle_p^.maxws_aio_slowdown_display > 0 THEN
              ijle_p^.maxws_aio_slowdown_display := ijle_p^.maxws_aio_slowdown_display - 1;
            IFEND;
            IF mmv$aging_algorithm >= 4 THEN
              cptime := ijle_p^.statistics.cp_time.time_spent_in_job_mode;
            ELSE
              cptime := ijle_p^.statistics.cp_time.time_spent_in_job_mode +
                    ijle_p^.statistics.cp_time.time_spent_in_mtr_mode;
            IFEND;
            jcb_p := #ADDRESS (1, mtc$job_fixed_segment + ajlo, 0);
            IF jcb_p^.next_cyclic_aging_time < #FREE_RUNNING_CLOCK (0) THEN
              mmp$remove_stale_pages (ijle_p^.job_page_queue_list [mmc$pq_job_working_set], 1, 1, jcb_p,
                    ijle_p, mmc$pq_avail_modified, jcb_p^.min_working_set_size, mcount, rcount, scount);
              mmp$remove_stale_pages (ijle_p^.job_page_queue_list [mmc$pq_job_io_error], 30, 30, jcb_p,
                    ijle_p, mmc$pq_avail_modified, 0, mcount, rcount, scount);
            IFEND;
            IF (ijle_p^.swap_status = jmc$iss_executing) AND (ijle_p^.entry_status =
                  jmc$ies_job_in_memory) AND (NOT jmv$ajl_p^ [ajlo].job_is_good_swap_candidate) THEN
              IF ((#FREE_RUNNING_CLOCK (0) - jcb_p^.last_execution_time) > tmv$long_wait_force_swap_time) AND
                    (ijle_p^.statistics.ready_task_count = 0) THEN
                tmp$check_for_swapout_candidate (ajlo);
              ELSEIF jmv$idle_dispatching_controls.controls [ijle_p^.scheduling_dispatching_priority].
                    blocked THEN
                tmp$idle_non_dispatchable_job (ajlo);
              IFEND;
            IFEND;

{Note: mmp$age_job_working_set: I believe we actually could idle or dequeue the job when we overrun
{      the Q, but first we need to know more

            IF aggressive_aging THEN
              mmp$age_job_working_set (ijle_p, jcb_p, FALSE, ignore_overrun_mod_q);
            ELSEIF cptime > (jcb_p^.cptime_next_age_working_set + 2 * jcb_p^.page_aging_interval) THEN
              mmv$aging_statistics.age_cp_bound_job := mmv$aging_statistics.age_cp_bound_job + 1;
              mmp$age_job_working_set (ijle_p, jcb_p, FALSE, ignore_overrun_mod_q);
            IFEND;
            jmp$unlock_ajl (ijle_p);
          ELSE
            tmp$clear_lock (tmv$ptl_lock);
          IFEND;

        ELSE
          tmp$clear_lock (tmv$ptl_lock);
        IFEND;
      FOREND;
      time_last_full_jws_scan := #FREE_RUNNING_CLOCK (0);
      mmv$reduce_jws_for_thrashing := FALSE;
    IFEND;


{Call replenish free queue.

    mmp$replenish_free_queues (0);

{Reclaim unused ast entries

    IF mmv$async_work.reclaim_astes THEN
      mmv$async_work.reclaim_astes := FALSE;
      mmp$reclaim_ast_entries (0);
    IFEND;

{Process outstanding page table full conditions.

    IF mmv$async_work.pt_full THEN
      IF mmv$async_work.pt_full_aste_p^.in_use THEN

        IF jmp$ijl_block_valid (mmv$async_work.pt_full_aste_p^.ijl_ordinal) THEN
          mmp$get_inhibit_io_status (mmv$async_work.pt_full_aste_p^.ijl_ordinal, FALSE {lock ajl} ,
                inhibit_io, ijle_p);
        ELSE
          inhibit_io := FALSE;
        IFEND;

        IF NOT inhibit_io THEN
          mmp$process_page_table_full (mmv$async_work.pt_full_sva, new_asid, new_asti, new_aste_p,
                pt_full_status);
        IFEND;
      IFEND;
      mmv$async_work.pt_full := FALSE;
    IFEND;


{If tasks are in the memory wait queue, ready one task. This mechanism is NOT the normal mechanism
{for waking tasks in memory wait. Normally this is done as soon as the memory becomes available. This
{mechanism is a FAIL-SAFE mechanism in case 1) the task waiting for memory doesnt request it again
{when it is readied from the memory-wait queue, or 2) give critical tasks memory when sever thrashing occurs.

    IF (mmv$memory_wait_queue.head <> 0) OR (mmv$avail_mod_wait_queue.head <> 0) THEN
      p$dequeue_tasks_on_mem_limits (1 {= we dequeue a maximum of 1 task} );
    IFEND;

    IF osv$keypoint_control.periodic_requested THEN
      osp$process_keypoint_periodic;
    IFEND;

{Update statistics.

    IF mmv$reassignable_page_frames.now + mmv$reassignable_page_frames.soon <= mmv$aggressive_aging_level THEN
      mmv$aging_statistics.aggressive_aging_failed := mmv$aging_statistics.aggressive_aging_failed + 1;
    IFEND;


*if $true(mmc$debug)
{** Debug code - allow for testing of ASID REASSIGNMENT.

    IF mmv$test_reassign_asid AND (#FREE_RUNNING_CLOCK (0) > time_next_free_astes) THEN
      FOR asti := 1 TO UPPERBOUND (mmv$ast_p^) DO
        aste_p := ^mmv$ast_p^ [asti];
        IF (aste_p^.pages_in_memory = 0) AND aste_p^.in_use THEN
          mmp$asid (asti, asid);
          IF (jmp$ijl_block_valid (mmv$ast_p^ [asti].ijl_ordinal)) AND
                (jmv$ijl_p.block_p^ [mmv$ast_p^ [asti].ijl_ordinal.block_number].index_p^ [mmv$ast_p^ [asti].
                ijl_ordinal.block_index].entry_status <> jmc$ies_entry_free) THEN
            mmp$change_asid (^mmv$ast_p^ [asti], asid, 0, 0);
          IFEND;
          mmp$free_asid (asid, ^mmv$ast_p^ [asti]);
        IFEND;
        IF aste_p^.in_use THEN
          IF aste_p^.sfid.residence = gfc$tr_system THEN
            gfp$mtr_get_locked_fde_p (aste_p^.sfid, NIL, fde_p);
            IF fde_p^.asti <> asti THEN
              mtp$error_stop ('MM - dangling AST entry found');
            IFEND;
          IFEND;
        IFEND;
      FOREND;
      time_next_free_astes := #FREE_RUNNING_CLOCK (0) + 1000000;
    IFEND;
*ifend
*if $true(mmc$debug)

{** DEBUG code - allow for testing PAGE TABLE full.
    FOR pass := 1 TO mmv$test_pt_full DO
      pti := #FREE_RUNNING_CLOCK (0) MOD mmv$pt_length;
      pfti := (mmv$pt_p^ [pti].rma * 512) DIV osv$page_size;
      WHILE (mmv$pt_p^ [pti].pageid.asid = 0) OR (pfti < LOWERBOUND (mmv$pft_p^)) OR
            (pfti > UPPERBOUND (mmv$pft_p^)) OR (mmv$pft_p^ [pfti].aste_p = NIL) OR
            (mmv$pft_p^ [pfti].aste_p^.in_use = FALSE) DO
        pti := pti + 1;
        IF pti = mmv$pt_length THEN
          pti := 0;
        IFEND;
        pfti := (mmv$pt_p^ [pti].rma * 512) DIV osv$page_size;
      WHILEND;
      IF jmp$ijl_block_valid (mmv$pft_p^ [pfti].ijl_ordinal) THEN
        mmp$get_inhibit_io_status (mmv$pft_p^ [pfti].ijl_ordinal, FALSE {lock ajl} , inhibit_io, ijle_p);
      ELSE
        inhibit_io := FALSE;
      IFEND;
      IF NOT inhibit_io THEN
        mmp$process_page_table_full (mmv$pft_p^ [pfti].sva, new_asid, new_asti, new_aste_p, pt_full_status);
      IFEND;
    FOREND;
*ifend

{Reset the time that CP Monitor should next call this procedure.

    mmv$time_to_call_mem_mgr := mmv$periodic_call_interval + #FREE_RUNNING_CLOCK (0);

  PROCEND mmp$periodic_call;
?? TITLE := 'REPLENISH_FREE_QUEUES' ??
?? EJECT ??

{--------------------------------------------------------------------------------------------------------
{Name:
{  replenish_free_queues
{Purpose:
{  This routine is called to determine if the number of FREE + AVAILABLE
{  is getting too low.
{
{    asid: (input) If only pages belonging to a specific ASID should be written then this parameter
{        specifies the ASID. If ALL ASIDs should be written then a 0 (zero) is passed.
{
{
{--------------------------------------------------------------------------------------------------------


  PROCEDURE [XDCL] mmp$replenish_free_queues
    (    asid: ost$asid);

    CONST
      max_dm_rejects = 32;

    VAR
      fde_p: gft$locked_file_desc_entry_p,
      write_status: mmt$write_page_to_disk_status,
      pfti: mmt$page_frame_index,
      next_pfti: mmt$page_frame_index,
      ijle_p: ^jmt$initiated_job_list_entry,
      io_id: mmt$io_identifier,
      dm_reject_table: array [1 .. max_dm_rejects] of ost$asid,
      i,
      j: integer,
      write_ok: boolean,
      inhibit_io: boolean;

    i := 0;
    io_id.specified := FALSE;

{Move pages from the AVAIL_MODIFIED queue to the AVAIL queue until free pages exceeds the threshold.

    pfti := mmv$gpql [mmc$pq_avail_modified].pqle.link.bkw;

    WHILE (pfti <> 0) AND ((mmv$reassignable_page_frames.now + mmv$reassignable_page_frames.soon) <
          mmv$write_aged_out_pages) DO
      IF mmv$pft_p^ [pfti].aste_p^.in_use = FALSE THEN
        mtp$error_stop ('MM - replenish found page in AM q with AST free');
      IFEND;
      IF mmv$pft_p^ [pfti].aste_p^.sfid.residence = gfc$tr_system_wait_recovery THEN
        RETURN; {----->
      IFEND;
      next_pfti := mmv$pft_p^ [pfti].link.bkw;
      IF (mmv$pt_p^ [mmv$pft_p^ [pfti].pti].m) AND ((asid = 0) OR (mmv$pft_p^ [pfti].sva.asid = asid)) THEN
        IF jmp$ijl_block_valid (mmv$pft_p^ [pfti].ijl_ordinal) THEN
          mmp$get_inhibit_io_status (mmv$pft_p^ [pfti].ijl_ordinal, TRUE {lock ajl} , inhibit_io, ijle_p);
        ELSE
          inhibit_io := TRUE;
        IFEND;
        IF NOT inhibit_io THEN
          write_ok := TRUE;

        /asid_check/
          FOR j := 1 TO i DO
            IF mmv$pft_p^ [pfti].sva.asid = dm_reject_table [j] THEN
              write_ok := FALSE;
              jmp$unlock_ajl (ijle_p);
              EXIT /asid_check/; {----->
            IFEND;
          FOREND /asid_check/;
          IF write_ok THEN
            gfp$mtr_get_locked_fde_p (mmv$pft_p^ [pfti].aste_p^.sfid, ijle_p, fde_p);
            mmp$write_page_to_disk (fde_p, pfti, ioc$write_page, io_id, mmv$multi_page_write, write_status);
            jmp$unlock_ajl (ijle_p);
            IF write_status = ws_physical_io_reject THEN
              RETURN; {----->
            ELSEIF write_status <> ws_ok THEN
              i := i + 1;
              dm_reject_table [i] := mmv$pft_p^ [pfti].sva.asid;
              IF i >= max_dm_rejects THEN
                RETURN; {----->
              IFEND;
            IFEND;
          IFEND;
        IFEND;
      IFEND;
      pfti := next_pfti;
    WHILEND;

  PROCEND mmp$replenish_free_queues;
?? TITLE := 'FREE_IMAGE_PAGES' ??
?? EJECT ??

  PROCEDURE [XDCL] mmp$free_image_pages_mtr;

    VAR
      i: integer,
      pte_p: ^ost$page_table_entry,
      pfti: mmt$page_frame_index;

    FOR i := 0 TO (mmv$pt_length - 1) DO
      pte_p := ^mmv$pt_p^ [i];
      IF (pte_p^.v) AND ((pte_p^.rma * 512) >= osv$180_memory_limits.deadstart_upper) AND
            ((pte_p^.rma * 512) < osv$180_memory_limits.upper) THEN
        pfti := (pte_p^.rma * 512) DIV osv$page_size;
        mmp$delete_pt_entry (pfti, TRUE);
        mmv$pft_p^ [pfti].queue_id := mmc$pq_free;
        mmv$pft_p^ [pfti].sva.asid := 0;
      IFEND;
    FOREND;

  PROCEND mmp$free_image_pages_mtr;
?? TITLE := 'MMP$CREATE_TASK' ??
?? EJECT ??
{----------------------------------------------------------------------------------------------------
{This procedure is called by the Dispatcher when a new task is created to initialize the SDT of the
{new task. Initialization consists if copying the ASID's of shared segments from the SDT of the parent
{task into the SDT of the new task.  Update the real memory address of the task's SDT in the task's
{exchange package.
{
{    MMP$CREATE_TASK (PARENT_XCB_P, XCB_P)
{
{----------------------------------------------------------------------------------------------------


  PROCEDURE [XDCL] mmp$create_task
    (    parent_xcb_p: ^ost$execution_control_block;
         xcb_p: ^ost$execution_control_block;
         ijle_p: ^jmt$initiated_job_list_entry);

    VAR
      fde_p: gft$file_desc_entry_p,
      max_segnum: ost$segment,
      parent_sdt_p: mmt$max_sdt_p,
      parent_sdtx_p: mmt$max_sdtx_p,
      sdte: mmt$segment_descriptor,
      sdt_p: mmt$max_sdt_p,
      st_rma: integer,
      sdtx_p: mmt$max_sdtx_p,
      segnum: ost$segment,
      taskid: ost$global_task_id;

    mmp$get_max_sdt_sdtx_pointer (parent_xcb_p, parent_sdt_p, parent_sdtx_p);
    mmp$get_max_sdt_sdtx_pointer (xcb_p, sdt_p, sdtx_p);

{  Update the RMA of the task's segment table in the task's exchange package.

    #real_memory_address (sdt_p, st_rma);
    xcb_p^.xp.segment_table_address_1 := st_rma DIV 10000(16);
    xcb_p^.xp.segment_table_address_2 := st_rma MOD 10000(16);

    IF parent_xcb_p^.xp.segment_table_length > xcb_p^.xp.segment_table_length THEN
      max_segnum := xcb_p^.xp.segment_table_length;
    ELSE
      max_segnum := parent_xcb_p^.xp.segment_table_length;
    IFEND;

{ For performance, try to propagate the ASID/ASTI from the segment table entry of the parent (copy the
{ entire st entry) if the parent and child are both using corresponding segments for the same file
{ (compare sfids).  If the segments are not being used for the same file, the ASID/ASTI in the child's
{ segment table entry will remain zero.  When the child task first page faults for a page of the segment,
{ an ASID will be assigned.

    taskid := xcb_p^.global_task_id;
    FOR segnum := 0 TO max_segnum DO
      IF sdt_p^.st [segnum].ste.vl <> osc$vl_invalid_entry THEN
        IF (parent_sdt_p^.st [segnum].ste.vl <> osc$vl_invalid_entry) AND
              (sdtx_p^.sdtx_table [segnum].sfid = parent_sdtx_p^.sdtx_table [segnum].sfid) THEN
          sdt_p^.st [segnum] := parent_sdt_p^.st [segnum];
        ELSEIF sdtx_p^.sdtx_table [segnum].inheritance = mmc$si_new_segment THEN
          gfp$mtr_get_fde_p (sdtx_p^.sdtx_table [segnum].sfid, ijle_p, fde_p); {No need to lock}
          fde_p^.global_task_id := taskid;
        IFEND;
      IFEND;
    FOREND;

  PROCEND mmp$create_task;

?? TITLE := 'MMP$EXIT_TASK' ??
?? EJECT ??
{----------------------------------------------------------------------------------------------------
{This procedure is called by the dispatcher when a task exits to  free pages and ASIDs assigned
{to task template segments.
{
{    MMP$EXIT_TASK (PARENT_XCB_P, XCB_P)
{
{----------------------------------------------------------------------------------------------------


  PROCEDURE [XDCL] mmp$exit_task
    (    xcb_p: ^ost$execution_control_block);

    VAR
      fde_p: gft$locked_file_desc_entry_p,
      sdt_p: mmt$max_sdt_p,
      cst_p: ^ost$cpu_state_table,
      sdtx_p: mmt$max_sdtx_p,
      page_count: integer,
      aste_p: ^mmt$active_segment_table_entry,
      sva: ost$system_virtual_address,
      segnum: ost$segment;

    mmp$get_max_sdt_sdtx_pointer (xcb_p, sdt_p, sdtx_p);
    cst_p := mtf$cst_p ();

    sva.offset := 0;
    FOR segnum := 0 TO xcb_p^.xp.segment_table_length DO
      IF sdt_p^.st [segnum].ste.vl <> osc$vl_invalid_entry THEN
        IF sdtx_p^.sdtx_table [segnum].inheritance = mmc$si_new_segment THEN
          sva.asid := sdt_p^.st [segnum].ste.asid;
          IF sva.asid <> 0 THEN
            sdt_p^.st [segnum].ste.asid := 0;
            aste_p := ^mmv$ast_p^ [sdt_p^.st [segnum].asti];
            IF NOT aste_p^.in_use THEN
              mtp$error_stop ('MM - ast not in use');
            IFEND;
            gfp$mtr_get_locked_fde_p (aste_p^.sfid, cst_p^.ijle_p, fde_p);
            fde_p^.asti := 0;
            mmp$mm_free_pages (sva, 7fffffff(16), aste_p, TRUE, page_count);
          IFEND;
        IFEND;
      IFEND;
    FOREND;

  PROCEND mmp$exit_task;

?? TITLE := 'MMP$CREATE_JOB' ??
?? EJECT ??
{----------------------------------------------------------------------------------------------------
{This procedure is called by the Dispatcher when a new job is created to initialize the SDT of the
{new job.
{
{    MMP$CREATE_JOB (PARENT_XCB, XCB_P)
{
{! * * * MUST CHANGE WHEN MULTIPLE JOB TEMPLATES ARE SUPPORTED.
{----------------------------------------------------------------------------------------------------


  PROCEDURE [XDCL] mmp$create_job
    (    new_job_ajl_ordinal: jmt$ajl_ordinal;
         xcb_segnum_relative_jobs_as: ost$segment;
         parent_xcb_p: ^ost$execution_control_block;
         xcb_p: ^ost$execution_control_block);

    VAR
      aste_p: ^mmt$active_segment_table_entry,
      fde_p: gft$file_desc_entry_p,
      ijle_p: ^jmt$initiated_job_list_entry,
      jcb_p: ^jmt$job_control_block,
      jf_fde_p: gft$locked_file_desc_entry_p,
      new_job_ijl_ordinal: jmt$ijl_ordinal,
      parent_fde_p: gft$locked_file_desc_entry_p,
      parent_sdt_p: mmt$max_sdt_p,
      parent_sdtx_p: mmt$max_sdtx_p,
      pfti: mmt$page_frame_index,
      sdt_p: mmt$max_sdt_p,
      sdtx_p: mmt$max_sdtx_p,
      segnum: ost$segment,
      sva: ost$system_virtual_address,
      taskid: ost$global_task_id;

    mmp$get_max_sdt_sdtx_pointer (parent_xcb_p, parent_sdt_p, parent_sdtx_p);
    mmp$get_max_sdt_sdtx_pointer (xcb_p, sdt_p, sdtx_p);

{ Copy the segment table entry of the segment used for the new job fixed in the parent to the new job's
{ job fixed segment table entry.  Fix the ring and cache bypass values; they are not correct in the
{ parent's ste.
{ Invalidate the segment used by the parent.

    sdt_p^.st [osc$segnum_job_fixed_heap] := parent_sdt_p^.st [xcb_segnum_relative_jobs_as];
    sdt_p^.st [osc$segnum_job_fixed_heap].ste.vl := osc$vl_cache_bypass;
    sdt_p^.st [osc$segnum_job_fixed_heap].ste.r2 := 3;
    parent_sdt_p^.st [xcb_segnum_relative_jobs_as].ste.vl := osc$vl_invalid_entry;
    parent_sdt_p^.st [xcb_segnum_relative_jobs_as].ste.asid := 0;
    gfp$mtr_get_locked_fde_p (parent_sdtx_p^.sdtx_table [xcb_segnum_relative_jobs_as].sfid,
          mtf$cst_p ()^.ijle_p, parent_fde_p);
    parent_fde_p^.asti := 0;
    sva.asid := sdt_p^.st [osc$segnum_job_fixed_heap].ste.asid;
    sva.offset := 0;
    #PURGE_BUFFER (osc$purge_all_page_seg_map, sva);
    new_job_ijl_ordinal := jmv$ajl_p^ [new_job_ajl_ordinal].ijl_ordinal;
    ijle_p := jmf$ijle_p (new_job_ijl_ordinal);

{ Copy template segments from the parent (system job task) to the new job's job monitor task.

    taskid := xcb_p^.global_task_id;
    FOR segnum := 0 TO mmv$max_template_segment_number DO
      IF (sdt_p^.st [segnum].ste.vl <> osc$vl_invalid_entry) THEN

{ The second clause of the following IF statement is to prevent the copying of
{ "shared" segments when executing within a multiple job template.

        IF (sdtx_p^.sdtx_table [segnum].open_validating_ring_number = 0) AND
              (sdtx_p^.sdtx_table [segnum].sfid = parent_sdtx_p^.sdtx_table [segnum].sfid) THEN
          sdt_p^.st [segnum] := parent_sdt_p^.st [segnum];
        ELSE
          gfp$mtr_get_fde_p (sdtx_p^.sdtx_table [segnum].sfid, ijle_p, fde_p); {No need to lock}
          fde_p^.global_task_id := taskid;
        IFEND;
      IFEND;
    FOREND;

{  Move the job fixed segment of the new job to the job queue of that job.

    aste_p := ^mmv$ast_p^ [sdt_p^.st [osc$segnum_job_fixed_heap].asti];
    aste_p^.sfid := sdtx_p^.sdtx_table [osc$segnum_job_fixed_heap].sfid;
    aste_p^.queue_id := mmc$pq_job_fixed;
    aste_p^.ijl_ordinal := new_job_ijl_ordinal;
    gfp$mtr_get_locked_fde_p (aste_p^.sfid, ijle_p, jf_fde_p);
    jf_fde_p^.last_segment_number := osc$segnum_job_fixed_heap;
    jf_fde_p^.global_task_id := xcb_p^.global_task_id;
    jf_fde_p^.asti := sdt_p^.st [osc$segnum_job_fixed_heap].asti;
    jf_fde_p^.eoi_byte_address := gfc$fde_table_base + osv$page_size;
    jcb_p := #ADDRESS (1, mtc$job_fixed_segment + ijle_p^.ajl_ordinal, 0);
    jcb_p^.next_cyclic_aging_time := #FREE_RUNNING_CLOCK (0) + jcb_p^.cyclic_aging_interval;
    ijle_p^.job_fixed_asid := sva.asid;

    mmp$initialize_find_next_pfti (sva, 7ffffff0(16), include_partial_pages, psc_all, aste_p, pfti);

  /relink_job_fixed_in_new_queue/
    WHILE pfti <> 0 DO
      mmp$relink_page_frame (pfti, mmc$pq_wired);
      mmv$pft_p^ [pfti].ijl_ordinal := new_job_ijl_ordinal;
      mmp$relink_page_frame (pfti, mmc$pq_job_fixed);
      mmp$find_next_pfti (pfti);
    WHILEND /relink_job_fixed_in_new_queue/;

  PROCEND mmp$create_job;
?? TITLE := 'MMP$EXIT_JOB' ??
?? EJECT ??
{----------------------------------------------------------------------------------------------------
{This procedure is called by the dispatcher when a job exits to  free pages and ASIDs assigned
{to non-inherited segments.
{
{        MMP$EXIT_JOB (XCB_P)
{
{ XCB_P: (input) This parameter is a pointer to the execution control block
{        of the job exiting.
{
{----------------------------------------------------------------------------------------------------


  PROCEDURE [XDCL] mmp$exit_job
    (    xcb_p: ^ost$execution_control_block);

    mmp$free_memory_in_job_queues (mtf$cst_p ()^.ijle_p^.job_page_queue_list, TRUE, FALSE, TRUE);

  PROCEND mmp$exit_job;

?? TITLE := 'MMP$MTR_LOCK_RING_1_STACK' ??
?? EJECT ??

{ PURPOSE:
{   This procedure is the monitor part of the process necessary to free a job's ring one stack at termination.
{ DESIGN:
{   This procedure changes the ring one stack to a transient file, and returns the disk file descriptor offset
{   (if there is one) to job mode.  Job mode will then free the disk space associated with the ring one stack.


  PROCEDURE [XDCL] mmp$mtr_lock_ring_1_stack
    (VAR request_block: mmt$rb_lock_ring_1_stack;
         cst_p: ^ost$cpu_state_table);

    VAR
      aste_p: ^mmt$active_segment_table_entry,
      count: 1 .. 32,
      fde_p: gft$locked_file_desc_entry_p,
      found: boolean,
      ipti: integer,
      page_count_freed: integer,
      pointer {CYBIL trick} : ptr_type,
      ste_p: ^mmt$segment_descriptor,
      stxe_p: ^mmt$segment_descriptor_extended,
      sva: ost$system_virtual_address;

    request_block.status.normal := TRUE;

{ Verify that the ring 1 stack page (an assumption is made that the stack is on one page) is valid in memory.
{ The only way it is not in memory is that it was freed just after the monitor request was issued from ring 1.
{ That is unlikely to happen, so if it has simply reissue the request.  This will cause the job to return to
{ ring 1, reference and get back its ring 1 stack page, and call monitor again.
{ The ring 1 stack must be valid in memory when the file is changed to transient.  If the stack has been
{ written to disk and freed, the job will not be able to page fault and get the page back from disk when it
{ returns.  Instead a new (zeroed out) page would be assigned, which the job cannot return to.

    pointer.pva := cst_p^.xcb_p^.xp.tos_registers [1].pva;

    mmp$convert_pva (pointer.p, cst_p, sva, fde_p, aste_p, ste_p, stxe_p);
    #HASH_SVA (sva, ipti, count, found);
    IF NOT found OR NOT mmv$pt_p^ [ipti].v THEN
      request_block.status.normal := FALSE;
      RETURN; {----->
    IFEND;

{ Free any pages of the ring 1 stack beyond top of stack that may still be around.  The pages
{ of concern are the pages in the available modified queue.  They got there thru aging while
{ they were still a valid part of the stack.  For some reason they haven't been written out
{ yet (i.e. assign active set) and they were never referenced again.

    IF aste_p^.pages_in_memory > 1 THEN
      mmp$mm_free_pages (sva, 7fffffff(16), aste_p, FALSE, page_count_freed);
      fde_p^.eoi_byte_address := sva.offset;
    IFEND;

    IF fde_p^.media = gfc$fm_mass_storage_file THEN
      request_block.disk_file_descriptor_offset := fde_p^.disk_file_descriptor_p;
      dmp$deallocate_file_space (fde_p, 0, amc$file_byte_limit);
      fde_p^.media := gfc$fm_transient_segment;
    ELSE
      request_block.disk_file_descriptor_offset := 0;
    IFEND;

  PROCEND mmp$mtr_lock_ring_1_stack;

?? TITLE := 'MMP$DETERMINE_ERROR_STATE' ??
?? EJECT ??

  PROCEDURE [XDCL] mmp$determine_error_state
    (    list_p: ^mmt$rma_list;
         list_length: mmt$rma_list_length;
     VAR io_error: boolean);

    VAR
      pfte_p: ^mmt$page_frame_table_entry,
      pfti: mmt$page_frame_index,
      list_i: mmt$rma_list_index;

    io_error := FALSE;

  /check_pages/
    FOR list_i := 1 TO list_length DO
      IF list_p^ [list_i].length = 0 THEN
        EXIT /check_pages/; {----->
      IFEND;
      pfti := list_p^ [list_i].rma DIV osv$page_size;
      pfte_p := ^mmv$pft_p^ [pfti];
      IF (pfte_p^.io_error = ioc$media_error) OR (pfte_p^.io_error = ioc$unrecovered_error) OR
            (pfte_p^.io_error = ioc$error_on_init) THEN
        io_error := TRUE;
        RETURN; {----->
      IFEND;
    FOREND /check_pages/;

  PROCEND mmp$determine_error_state;
?? TITLE := 'MMP$QUICK_SWEEP' ??

?? EJECT ??
{------------------------------------------------------------------
{This procedure is a special call  to age the Shared Page Queues.
{
{   MMP$QUICK_SWEEP
{
{-----------------------------------------------------------------------

  PROCEDURE [XDCL, #GATE] mmp$quick_sweep;

    CONST
      c$relink_page_reject_count_max = 100; { This value is just an initial attempt

    VAR
      ijle_p: ^jmt$initiated_job_list_entry,
      last_pfti: mmt$page_frame_index,
      mcount: integer,
      mcount1: integer,
      pfte_p: ^mmt$page_frame_table_entry,
      pfti: mmt$page_frame_index,
      queue: mmt$page_frame_queue_id,
      rcount: integer,
      rcount1: integer,
      scount1: integer,
      relink_page_status: mmt$relink_page_status,
      relink_page_reject_count: integer,
      relink_status: mmt$relink_page_status,
      time_last_quick_sweep: [STATIC] integer := 200000;

{ Age the shared queues if necessary:

    IF ((#FREE_RUNNING_CLOCK (0) - time_last_quick_sweep) > mmv$quick_sweep_interval) THEN

      FOR queue := mmc$pq_shared_first TO mmv$last_active_shared_queue DO
        mmp$remove_stale_pages (mmv$gpql [queue].pqle, mmv$gpql [queue].age_interval_actual_modified,
              mmv$gpql [queue].age_interval_actual_unmodified, NIL, NIL, mmc$pq_avail_modified,
              mmv$gpql [queue].minimum_actual, mcount1, rcount1, scount1);

{ Trim the shared queue down to maximum size; page faulting for a shared page does not automatically trim the
{ shared queue.  A NIL ijl pointer is passed to the remove procedure; the ijl pointer is used only if the file
{ is a job file.  Pages in the shared queues cannot belong to a job file (the shared io error queue is an
{ exception).

        pfti := mmv$gpql [queue].pqle.link.bkw;
        WHILE (mmv$gpql [queue].pqle.count > mmv$gpql [queue].maximum_actual) AND (pfti <> 0) AND
              (relink_page_reject_count <= c$relink_page_reject_count_max) DO
          pfte_p := ^mmv$pft_p^ [pfti];
          last_pfti := pfti;
          pfti := pfte_p^.link.bkw;
          mmp$remove_page_from_jws (last_pfti, NIL {ijle_p} , FALSE {= relink when Avail Mod Q Max} , mcount1,
                rcount, relink_status);
          IF relink_status = mmc$rps_avail_mod_queue_flooded THEN
            relink_page_reject_count := relink_page_reject_count + 1;
          IFEND;
          mcount1 := mcount1 + mcount;
          rcount1 := rcount1 + rcount;
        WHILEND;
        mmv$sq_mcount := mmv$sq_mcount + mcount1;
        mmv$sq_rcount := mmv$sq_rcount + rcount1;

        mmv$aging_statistics.age_shared_queue [queue].total_pages := mmv$aging_statistics.
              age_shared_queue [queue].total_pages + rcount1;
        mmv$aging_statistics.age_shared_queue [queue].modified_pages :=
              mmv$aging_statistics.age_shared_queue [queue].modified_pages + mcount1;
        mmv$aging_statistics.age_shared_queue [queue].pages_scanned := mmv$aging_statistics.
              age_shared_queue [queue].pages_scanned + scount1;
      FOREND;
      time_last_quick_sweep := #FREE_RUNNING_CLOCK (0);
    IFEND;

{ Call replenish free queue.
    mmp$replenish_free_queues (0);

{ Reclaim unused ast entries
    IF mmv$async_work.reclaim_astes THEN
      mmv$async_work.reclaim_astes := FALSE;
      mmp$reclaim_ast_entries (0);
    IFEND;

{ Reset the time that CP Monitor should next call this procedure.
    mmv$time_to_call_quick_sweep := mmv$quick_sweep_interval + #FREE_RUNNING_CLOCK (0);

  PROCEND mmp$quick_sweep;
?? OLDTITLE ??
MODEND mmm$monitor_request_processor;
