MODULE mmm$page_fault_processor;
?? RIGHT := 110 ??

{  PURPOSE: Memory_Manager
{     This module contains the monitor routines that are used to
{     manage physical memory and the page table.

?? PUSH (LISTEXT := ON) ??
?? NEWTITLE := 'Global Declarations Referenced by this Module', EJECT ??
*copyc mmc$first_transient_segment
*copyc mmc$manage_memory_utility
*copyc mmc$move_pages_max_req_length
*copyc mmc$shadow_allocation_size
*copyc mtc$job_fixed_segment
*copyc osc$processor_defined_registers
*copyc osc$purge_map_and_cache
*copyc tmc$signal_identifiers
*copyc mmd$segment_access_condition
*copyc dfe$error_condition_codes
*copyc ioe$st_errors
*copyc mme$condition_codes
*copyc dft$server_descriptor
*copyc dmt$mass_storage_error_codes
*copyc iot$io_error
*copyc jmt$delayed_swapin_work
*copyc jmt$ijl_swap_status
*copyc jmt$job_scheduler_event
*copyc mmt$active_segment_table
*copyc mmt$asid_list_page_table_full
*copyc mmt$assign_contig_passes
*copyc mmt$async_work_list
*copyc mmt$buffer_descriptor
*copyc mmt$continue_bit_count
*copyc mmt$i_aging_statistics
*copyc mmt$i_pf_statistics
*copyc mmt$int_segment_access_fault
*copyc mmt$io_identifier
*copyc mmt$keypoint_page_fault_status
*copyc mmt$make_pt_entry_status
*copyc mmt$manage_memory_utility
*copyc mmt$move_pages_page_count
*copyc mmt$page_frame_index
*copyc mmt$page_frame_queue_id
*copyc mmt$page_frame_table
*copyc mmt$page_pull_status
*copyc mmt$page_queue_list
*copyc mmt$page_streaming_statistics
*copyc mmt$paging_statistics
*copyc mmt$rb_advise
*copyc mmt$rb_assign_contig_memory
*copyc mmt$rb_assign_pages
*copyc mmt$rb_move_pages
*copyc mmt$reassignable_page_frames
*copyc mmt$relink_page_status
*copyc mmt$rma_list
*copyc mmt$segment_access_rights
*copyc mmt$segment_access_type
*copyc mmt$update_eoi_reason
*copyc ost$cpu_state_table
*copyc ost$heap
*copyc ost$keypoint_control
*copyc ost$page_size
*copyc ost$segment_access_control
*copyc sft$file_space_limit_kind
*copyc syt$monitor_flag
*copyc syt$perf_keypoints_enabled
*copyc tmt$fnx_search_type
?? POP ??

  PROCEDURE hide;

*copyc mmv$a_divisor
*copyc mmv$a_mult
*copyc mmv$asid_asti_bits
*copyc mmv$ast_p
  PROCEND hide;
*copyc dfp$fetch_multi_page_status
*copyc dfp$fetch_page_status
*copyc dfp$file_server_allocation
*copyc dfp$get_served_file_desc_p
*copyc dfp$server_io
*copyc dmp$fetch_multi_page_status
*copyc dmp$fetch_page_status
*copyc dpp$display_error
*copyc gfp$mtr_get_sfid_from_fde_p
*copyc gfp$mtr_get_fde_p
*copyc gfp$mtr_get_locked_fde_p
*copyc gfp$mtr_unlock_fde_p
*copyc iop$enable_all_disk_units
*copyc iop$pager_io
*copyc jmf$ijle_p
*copyc jmp$check_scheduler_memory_wait
*copyc jmp$recognize_thrashing
*copyc jmp$set_scheduler_event
*copyc jmp$unlock_ajl
*copyc jsp$free_swapped_jobs_memory
*copyc mmf$include_pages_in_dump
*copyc mmp$assign_asid
*copyc mmp$asid
*copyc mmp$aste_pointer
*copyc mmp$check_queues
*copyc mmp$delete_pt_entry
*copyc mmp$fetch_pfti_array_size
*copyc mmp$find_next_pfti
*copyc mmp$find_next_pfti
*copyc mmp$free_asid
*copyc mmp$get_inhibit_io_status
*copyc mmp$initialize_find_next_pfti
*copyc mmp$make_pt_entry
*copyc mmp$get_sdt_entry_p
*copyc mmp$get_sdtx_entry_p
*copyc mmp$get_verify_asti_in_fde
*copyc mmp$page_pull_hash_sva
*copyc mmp$preset_real_memory
*copyc mmp$purge_all_cache_map
*copyc mmp$purge_all_cache_proc
*copyc mmp$purge_all_map_proc
*copyc mmp$remove_pages_working_set
*copyc mmp$reset_find_next_pfti
{      mmp$reset_store_next_pfti contains inline proc named mmp$reset_store_pfti
*copyc mmp$reset_store_next_pfti
*copyc mmp$reset_store_pfti_reverse
{      mmp$store_next_pfti contains  inline proc named mmp$store_pfti
*copyc mmp$store_next_pfti
*copyc mmp$store_pfti_reverse
*copyc mmp$sva_purge_all_cache
*copyc mtf$cst_p
*copyc mtp$error_stop
*copyc mtp$set_status_abnormal
*copyc mtp$step_unstep_system
*copyc mtp$store_informative_message
*copyc osp$process_keypoint_page_fault
*copyc tmp$cause_task_switch
*copyc tmp$dequeue_task
*copyc tmp$find_next_xcb
*copyc tmp$get_top_of_stack
*copyc tmp$get_xcb_p
*copyc tmp$monitor_flag_job_tasks
*copyc tmp$queue_task
*copyc tmp$reissue_monitor_request
*copyc tmp$send_monitor_fault
*copyc tmp$set_monitor_flag
*copyc tmp$test_get_xcb_p
*copyc dfv$file_server_debug_enabled
*copyc gfv$null_sfid
*copyc jmv$ajl_p
*copyc jmv$ijl_p
*copyc jmv$null_ijl_ordinal
*copyc jmv$service_classes
*copyc jmv$system_ijl_ordinal
*copyc jsv$free_working_set_on_swapout
*copyc jsv$ijl_swap_queue_list
*copyc jsv$max_pages_first_swap_task
*copyc jsv$maximum_pages_to_swap
*copyc mmv$aging_algorithm
*copyc mmv$dynamic_avail_emergency_min
*copyc mmv$dynamic_available_floor
*copyc mmv$dynamic_available_max
*copyc mmv$dynamic_available_min
*copyc mmv$dynamic_neg_increment_max
*copyc mmv$dynamic_pos_increment_max
*copyc mmv$image_file
*copyc mmv$io_error_q_age_interval
*copyc mmv$jws_queue_age_interval
*copyc mmv$pages_to_dump_p
*copyc mmv$periodic_call_interval
*copyc mmv$pfti_array_p
*copyc mmv$preset_conversion_table
*copyc mmv$shared_queue_age_interval
*copyc mmv$time_to_call_mem_mgr
*copyc mtv$monitor_segment_table
*copyc mtv$sys_core_init_complete
*copyc osv$180_memory_limits
*copyc osv$time_to_check_asyn
*copyc tmv$null_global_task_id
*copyc tmv$ptl_p
?? NEWTITLE := 'Global Declarations Declared by this Module', EJECT ??

{ASID/ASTI Conversion Variables

  VAR
    mmv$a_mult: [XDCL, #GATE] 0 .. 10000(16),
    mmv$a_divisor: [XDCL, #GATE] 0 .. 10000(16),
    mmv$asid_asti_bits: [XDCL, #GATE] array [0 .. 15] of 0 .. 15 :=
          [0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15];

  VAR
    mmv$wait_on_avail_mod_q_full: [XDCL, #GATE] boolean := TRUE;

{Define AST template for job swapper to use to create a new entry for a Job Fixed segment.

  VAR
    mmv$initial_job_fixed_ast_entry: [XDCL, READ] mmt$active_segment_table_entry :=
          [[0, 0], 0, [0, 0], TRUE, mmc$pq_job_fixed, * , TRUE];

{ Define array for keeping statistics on status values returned from MMP$WRITE_PAGE_TO_DISK.

  VAR
    mmv$write_page_statistics: [XDCL, #GATE] array [mmt$write_page_to_disk_status] of integer :=
          [0, 0, 0, 0, 0, 0, 0];

  VAR
    mmv$reject_availmod_relink_stat: [XDCL, #GATE] array [1 .. 6] of record
      relinked: integer,
      qflooded: integer,
      rejected: integer,
    recend := [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]];

{  The following variables + the Global Page Queue List below are all managed by the Manage Memory Utility.
{      mmv$age_interval_ceiling         ( mmc$mmu_ma_aic
{      mmv$age_interval_floor           ( mmc$mmu_ma_aif
{      mmv$aggressive_aging_level       ( mmc$mmu_ma_aal
{      mmv$aggressive_aging_level_2     ( mmc$mmu_ma_aal2
{      mmv$aging_algorithm              ( mmc$mmu_ma_aa
{      mmv$avail_modified_queue_max     ( mmc$mmu_ma_amqmax
{      mmv$write_aged_out_pages         ( mmc$mmu_ma_amqmin
{      mmv$avail_mod_q_floor_min        ( mmc$mmu_ma_amqfm
{      mmv$avail_mod_q_decr_per_task    ( mmc$mmu_ma_amqdpt
{      mmv$avail_mod_waitq_dequeue_int  ( mmc$mmu_ma_amwqdi
{      mmv$mem_wait_q_dequeue_interval  ( mmc$mmu_ma_mwqdi
{      mmv$io_error_q_age_interval      ( mmc$mmu_ma_ieqai
{      mmv$jws_queue_age_interval       ( mmc$mmu_ma_jwsai
{      mmv$min_avail_pages              ( mmc$mmu_ma_minap
{      mmv$page_streaming_random_limit  ( mmc$mmu_ma_psrl
{      mmv$page_streaming_reads         ( mmc$mmu_ma_psr
{      mmv$page_streaming_prestream     ( mmc$mmu_ma_psp
{      mmv$page_streaming_threshold     ( mmc$mmu_ma_pst
{      mmv$page_streaming_transfer      ( mmc$mmu_ma_psts
{      mmv$periodic_call_interval       ( mmc$mmu_ma_pci
{      mmv$shared_queue_age_interval    ( mmc$mmu_ma_swsai
{      mmv$swapping_aic_modified        ( mmc$mmu_ma_sam
{      mmv$swapping_aic_unmodified      ( mmc$mmu_ma_sau
{      mmv$tick_time                    ( mmc$mmu_ma_tt
{      mmv$dynamic_available_floor      ( mmc$mmu_ma_daf
{      mmv$dynamic_avail_emergency_min  ( mmc$mmu_ma_daem
{      mmv$dynamic_available_max        ( mmc$mmu_ma_damax
{      mmv$dynamic_available_min        ( mmc$mmu_ma_damin
{      mmv$dynamic_neg_increment_max    ( mmc$mmu_ma_dnim
{      mmv$dynamic_pos_increment_max    ( mmc$mmu_ma_dpim
{
{  Define array for the Global Page Queue List and initialize it to the default values as defined in the
{  common deck mmc$manage_memory_utility.
{

  VAR
    mmv$gpql: [XDCL, #GATE] mmt$global_page_queue_list := [
{free              } [[[0, 0], 0],
{   Q age, encre     } 0, 0, 0,
{   age_interval     } 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
{   minimum          } 0, 0, 0, 0, 0, 0, 0, 0,
{   maximum          } 0, 0, 0, 0, 0, 0, 0, 0, 0,
{   cycles           } - $mmt$page_queue_age_cycles []],
{available         } [[[0, 0], 0],
{   Q age, encre     } 0, 0, 0,
{   age_interval     } 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
{   minimum          } 0, 0, 0, 0, 0, 0, 0, 0,
{   maximum          } 0, 0, 0, 0, 0, 0, 0, 0, 0,
{   cycles           } - $mmt$page_queue_age_cycles []],
{available modified} [[[0, 0], 0],
{   Q age, encre     } 0, 0, 0,
{   age_interval     } 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
{   minimum          } 0, 0, 0, 0, 0, 0, 0, 0,
{   maximum          } 0, 0, 0, 0, 0, 0, 0, 0, 0,
{   cycles           } - $mmt$page_queue_age_cycles []],
{wired             } [[[0, 0], 0],
{   Q age, encre     } 0, 0, 0,
{   age_interval     } 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
{   minimum          } 0, 0, 0, 0, 0, 0, 0, 0,
{   maximum          } 0, 0, 0, 0, 0, 0, 0, 0, 0,
{   cycles           } - $mmt$page_queue_age_cycles []],
{task_service      } [[[0, 0], 0],
{   Q age, encre     } 0, 0, 0,
{   age_interval     } mmc$mmu_queue_age_task_service, mmc$mmu_queue_age_task_service,
{                    } mmc$mmu_queue_age_task_service, mmc$mmu_queue_age_task_service,
{                    } mmc$mmu_queue_age_task_service, mmc$mmu_queue_age_task_service,
{                    } mmc$mmu_queue_age_task_service, mmc$mmu_queue_age_task_service,
{                    } 0, 0, 0, 0,
{   minimum          } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } 0, 0, 0, 0,
{   maximum          } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } 0, 0, 0, 0,
{   cycles           } - $mmt$page_queue_age_cycles []],
{pf_execute        } [[[0, 0], 0],
{   Q age, encre     } 0, 0, 0,
{   age_interval     } mmc$mmu_queue_age_pf_execute, mmc$mmu_queue_age_pf_execute,
{                    } mmc$mmu_queue_age_pf_execute, mmc$mmu_queue_age_pf_execute,
{                    } mmc$mmu_queue_age_pf_execute, mmc$mmu_queue_age_pf_execute,
{                    } mmc$mmu_queue_age_pf_execute, mmc$mmu_queue_age_pf_execute,
{                    } 0, 0, 0, 0,
{   minimum          } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } 0, 0, 0, 0,
{   maximum          } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } 0, 0, 0, 0,
{   cycles           } - $mmt$page_queue_age_cycles []],
{pf_non_execute    } [[[0, 0], 0],
{   Q age, encre     } 0, 0, 0,
{   age_interval     } mmc$mmu_queue_age_pf_non_exec, mmc$mmu_queue_age_pf_non_exec,
{                    } mmc$mmu_queue_age_pf_non_exec, mmc$mmu_queue_age_pf_non_exec,
{                    } mmc$mmu_queue_age_pf_non_exec, mmc$mmu_queue_age_pf_non_exec,
{                    } mmc$mmu_queue_age_pf_non_exec, mmc$mmu_queue_age_pf_non_exec,
{                    } 0, 0, 0, 0,
{   minimum          } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } 0, 0, 0, 0,
{   maximum          } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } 0, 0, 0, 0,
{   cycles           } - $mmt$page_queue_age_cycles []],
{device_file       } [[[0, 0], 0],
{   Q age, encre     } 0, 0, 0,
{   age_interval     } mmc$mmu_queue_age_device_file, mmc$mmu_queue_age_device_file,
{                    } mmc$mmu_queue_age_device_file, mmc$mmu_queue_age_device_file,
{                    } mmc$mmu_queue_age_device_file, mmc$mmu_queue_age_device_file,
{                    } mmc$mmu_queue_age_device_file, mmc$mmu_queue_age_device_file,
{                    } 0, 0, 0, 0,
{   minimum          } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } 0, 0, 0, 0,
{   maximum          } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } 0, 0, 0, 0,
{   cycles           } - $mmt$page_queue_age_cycles []],
{file_server       } [[[0, 0], 0],
{   Q age, encre     } 0, 0, 0,
{   age_interval     } mmc$mmu_queue_age_file_server, mmc$mmu_queue_age_file_server,
{                    } mmc$mmu_queue_age_file_server, mmc$mmu_queue_age_file_server,
{                    } mmc$mmu_queue_age_file_server, mmc$mmu_queue_age_file_server,
{                    } mmc$mmu_queue_age_file_server, mmc$mmu_queue_age_file_server,
{                    } 0, 0, 0, 0,
{   minimum          } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } 0, 0, 0, 0,
{   maximum          } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } 0, 0, 0, 0,
{   cycles           } - $mmt$page_queue_age_cycles []],
{other             } [[[0, 0], 0],
{   Q age, encre     } 0, 0, 0,
{   age_interval     } mmc$mmu_queue_age_other, mmc$mmu_queue_age_other,
{                    } mmc$mmu_queue_age_other, mmc$mmu_queue_age_other,
{                    } mmc$mmu_queue_age_other, mmc$mmu_queue_age_other,
{                    } mmc$mmu_queue_age_other, mmc$mmu_queue_age_other,
{                    } 0, 0, 0, 0,
{   minimum          } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } 0, 0, 0, 0,
{   maximum          } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } 0, 0, 0, 0,
{   cycles           } - $mmt$page_queue_age_cycles []],
{ site_01          } [[[0, 0], 0],
{   Q age, encre     } 0, 0, 0,
{   age_interval     } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } 0, 0, 0, 0,
{   minimum          } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } 0, 0, 0, 0,
{   maximum          } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } 0, 0, 0, 0,
{   cycles           } - $mmt$page_queue_age_cycles []],
{ site_02          } [[[0, 0], 0],
{   Q age, encre     } 0, 0, 0,
{   age_interval     } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } 0, 0, 0, 0,
{   minimum          } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } 0, 0, 0, 0,
{   maximum          } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } 0, 0, 0, 0,
{   cycles           } - $mmt$page_queue_age_cycles []],
{ site_03          } [[[0, 0], 0],
{   Q age, encre     } 0, 0, 0,
{   age_interval     } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } 0, 0, 0, 0,
{   minimum          } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } 0, 0, 0, 0,
{   maximum          } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } 0, 0, 0, 0,
{   cycles           } - $mmt$page_queue_age_cycles []],
{ site_04          } [[[0, 0], 0],
{   Q age, encre     } 0, 0, 0,
{   age_interval     } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } 0, 0, 0, 0,
{   minimum          } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } 0, 0, 0, 0,
{   maximum          } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } 0, 0, 0, 0,
{   cycles           } - $mmt$page_queue_age_cycles []],
{ site_05          } [[[0, 0], 0],
{   Q age, encre     } 0, 0, 0,
{   age_interval     } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } 0, 0, 0, 0,
{   minimum          } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } 0, 0, 0, 0,
{   maximum          } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } 0, 0, 0, 0,
{   cycles           } - $mmt$page_queue_age_cycles []],
{ site_06          } [[[0, 0], 0],
{   Q age, encre     } 0, 0, 0,
{   age_interval     } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } 0, 0, 0, 0,
{   minimum          } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } 0, 0, 0, 0,
{   maximum          } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } 0, 0, 0, 0,
{   cycles           } - $mmt$page_queue_age_cycles []],
{ site_07          } [[[0, 0], 0],
{   Q age, encre     } 0, 0, 0,
{   age_interval     } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } 0, 0, 0, 0,
{   minimum          } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } 0, 0, 0, 0,
{   maximum          } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } 0, 0, 0, 0,
{   cycles           } - $mmt$page_queue_age_cycles []],
{ site_08          } [[[0, 0], 0],
{   Q age, encre     } 0, 0, 0,
{   age_interval     } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } 0, 0, 0, 0,
{   minimum          } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } 0, 0, 0, 0,
{   maximum          } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } 0, 0, 0, 0,
{   cycles           } - $mmt$page_queue_age_cycles []],
{ site_09          } [[[0, 0], 0],
{   Q age, encre     } 0, 0, 0,
{   age_interval     } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } 0, 0, 0, 0,
{   minimum          } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } 0, 0, 0, 0,
{   maximum          } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } 0, 0, 0, 0,
{   cycles           } - $mmt$page_queue_age_cycles []],
{ site_10          } [[[0, 0], 0],
{   Q age, encre     } 0, 0, 0,
{   age_interval     } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } 0, 0, 0, 0,
{   minimum          } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } 0, 0, 0, 0,
{   maximum          } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } 0, 0, 0, 0,
{   cycles           } - $mmt$page_queue_age_cycles []],
{ site_11          } [[[0, 0], 0],
{   Q age, encre     } 0, 0, 0,
{   age_interval     } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } 0, 0, 0, 0,
{   minimum          } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } 0, 0, 0, 0,
{   maximum          } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } 0, 0, 0, 0,
{   cycles           } - $mmt$page_queue_age_cycles []],
{ site_12          } [[[0, 0], 0],
{   Q age, encre     } 0, 0, 0,
{   age_interval     } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } 0, 0, 0, 0,
{   minimum          } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } 0, 0, 0, 0,
{   maximum          } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } 0, 0, 0, 0,
{   cycles           } - $mmt$page_queue_age_cycles []],
{ site_13          } [[[0, 0], 0],
{   Q age, encre     } 0, 0, 0,
{   age_interval     } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } 0, 0, 0, 0,
{   minimum          } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } 0, 0, 0, 0,
{   maximum          } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } 0, 0, 0, 0,
{   cycles           } - $mmt$page_queue_age_cycles []],
{ site_14          } [[[0, 0], 0],
{   Q age, encre     } 0, 0, 0,
{   age_interval     } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } 0, 0, 0, 0,
{   minimum          } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } 0, 0, 0, 0,
{   maximum          } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } 0, 0, 0, 0,
{   cycles           } - $mmt$page_queue_age_cycles []],
{ site_15          } [[[0, 0], 0],
{   Q age, encre     } 0, 0, 0,
{   age_interval     } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } 0, 0, 0, 0,
{   minimum          } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } 0, 0, 0, 0,
{   maximum          } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } 0, 0, 0, 0,
{   cycles           } - $mmt$page_queue_age_cycles []],
{ site_16          } [[[0, 0], 0],
{   Q age, encre     } 0, 0, 0,
{   age_interval     } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } 0, 0, 0, 0,
{   minimum          } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } 0, 0, 0, 0,
{   maximum          } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } 0, 0, 0, 0,
{   cycles           } - $mmt$page_queue_age_cycles []],
{ site_17          } [[[0, 0], 0],
{   Q age, encre     } 0, 0, 0,
{   age_interval     } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } 0, 0, 0, 0,
{   minimum          } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } 0, 0, 0, 0,
{   maximum          } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } 0, 0, 0, 0,
{   cycles           } - $mmt$page_queue_age_cycles []],
{ site_18          } [[[0, 0], 0],
{   Q age, encre     } 0, 0, 0,
{   age_interval     } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } 0, 0, 0, 0,
{   minimum          } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } 0, 0, 0, 0,
{   maximum          } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } 0, 0, 0, 0,
{   cycles           } - $mmt$page_queue_age_cycles []],
{ site_19          } [[[0, 0], 0],
{   Q age, encre     } 0, 0, 0,
{   age_interval     } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } 0, 0, 0, 0,
{   minimum          } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } 0, 0, 0, 0,
{   maximum          } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } 0, 0, 0, 0,
{   cycles           } - $mmt$page_queue_age_cycles []],
{ site_20          } [[[0, 0], 0],
{   Q age, encre     } 0, 0, 0,
{   age_interval     } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } 0, 0, 0, 0,
{   minimum          } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } 0, 0, 0, 0,
{   maximum          } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } 0, 0, 0, 0,
{   cycles           } - $mmt$page_queue_age_cycles []],
{ site_21          } [[[0, 0], 0],
{   Q age, encre     } 0, 0, 0,
{   age_interval     } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } 0, 0, 0, 0,
{   minimum          } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } 0, 0, 0, 0,
{   maximum          } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } 0, 0, 0, 0,
{   cycles           } - $mmt$page_queue_age_cycles []],
{ site_22          } [[[0, 0], 0],
{   Q age, encre     } 0, 0, 0,
{   age_interval     } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } 0, 0, 0, 0,
{   minimum          } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } 0, 0, 0, 0,
{   maximum          } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } 0, 0, 0, 0,
{   cycles           } - $mmt$page_queue_age_cycles []],
{ site_23          } [[[0, 0], 0],
{   Q age, encre     } 0, 0, 0,
{   age_interval     } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } 0, 0, 0, 0,
{   minimum          } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } 0, 0, 0, 0,
{   maximum          } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } 0, 0, 0, 0,
{   cycles           } - $mmt$page_queue_age_cycles []],
{ site_24          } [[[0, 0], 0],
{   Q age, encre     } 0, 0, 0,
{   age_interval     } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } 0, 0, 0, 0,
{   minimum          } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } 0, 0, 0, 0,
{   maximum          } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } 0, 0, 0, 0,
{   cycles           } - $mmt$page_queue_age_cycles []],
{ site_25          } [[[0, 0], 0],
{   Q age, encre     } 0, 0, 0,
{   age_interval     } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } mmc$mmu_queue_age_site_queues, mmc$mmu_queue_age_site_queues,
{                    } 0, 0, 0, 0,
{   minimum          } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } mmc$mmu_queue_minimum, mmc$mmu_queue_minimum,
{                    } 0, 0, 0, 0,
{   maximum          } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } mmc$mmu_queue_maximum, mmc$mmu_queue_maximum,
{                    } 0, 0, 0, 0,
{   cycles           } - $mmt$page_queue_age_cycles []],
{shared_io_error   } [[[0, 0], 0],
{   Q age, encre     } 0, 0, 0,
{   age_interval     } 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,
{   minimum          } 0, 0, 0, 0, 0, 0, 0, 0,
{   maximum          } 0, 0, 0, 0, 0, 0, 0, 0, 0,
{   cycles           } - $mmt$page_queue_age_cycles []],
{swapped_io_error  } [[[0, 0], 0],
{   Q age, encre     } 0, 0, 0,
{   age_interval     } 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,
{   minimum          } 0, 0, 0, 0, 0, 0, 0, 0,
{   maximum          } 0, 0, 0, 0, 0, 0, 0, 0, 0,
{   cycles           } - $mmt$page_queue_age_cycles []],
{flawed            } [[[0, 0], 0],
{   Q age, encre     } 0, 0, 0,
{   age_interval     } 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
{   minimum          } 0, 0, 0, 0, 0, 0, 0, 0,
{   maximum          } 0, 0, 0, 0, 0, 0, 0, 0, 0,
{   cycles           } - $mmt$page_queue_age_cycles []]];

?? FMT (FORMAT := OFF) ??
  VAR
    mmv$manage_memory_utility: [XDCL, #GATE] mmt$manage_memory_utility:=  [
 [ [[[0,0],0],0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-$mmt$page_queue_age_cycles []],
   [[[0,0],0],0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-$mmt$page_queue_age_cycles []],
   [[[0,0],0],0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-$mmt$page_queue_age_cycles []],
   [[[0,0],0],0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-$mmt$page_queue_age_cycles []],
   [[[0,0],0],0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-$mmt$page_queue_age_cycles []],
   [[[0,0],0],0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-$mmt$page_queue_age_cycles []],
   [[[0,0],0],0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-$mmt$page_queue_age_cycles []],
   [[[0,0],0],0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-$mmt$page_queue_age_cycles []],
   [[[0,0],0],0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-$mmt$page_queue_age_cycles []],
   [[[0,0],0],0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-$mmt$page_queue_age_cycles []],
   [[[0,0],0],0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-$mmt$page_queue_age_cycles []],
   [[[0,0],0],0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-$mmt$page_queue_age_cycles []],
   [[[0,0],0],0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-$mmt$page_queue_age_cycles []],
   [[[0,0],0],0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-$mmt$page_queue_age_cycles []],
   [[[0,0],0],0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-$mmt$page_queue_age_cycles []],
   [[[0,0],0],0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-$mmt$page_queue_age_cycles []],
   [[[0,0],0],0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-$mmt$page_queue_age_cycles []],
   [[[0,0],0],0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-$mmt$page_queue_age_cycles []],
   [[[0,0],0],0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-$mmt$page_queue_age_cycles []],
   [[[0,0],0],0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-$mmt$page_queue_age_cycles []],
   [[[0,0],0],0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-$mmt$page_queue_age_cycles []],
   [[[0,0],0],0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-$mmt$page_queue_age_cycles []],
   [[[0,0],0],0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-$mmt$page_queue_age_cycles []],
   [[[0,0],0],0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-$mmt$page_queue_age_cycles []],
   [[[0,0],0],0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-$mmt$page_queue_age_cycles []],
   [[[0,0],0],0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-$mmt$page_queue_age_cycles []],
   [[[0,0],0],0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-$mmt$page_queue_age_cycles []],
   [[[0,0],0],0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-$mmt$page_queue_age_cycles []],
   [[[0,0],0],0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-$mmt$page_queue_age_cycles []],
   [[[0,0],0],0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-$mmt$page_queue_age_cycles []],
   [[[0,0],0],0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-$mmt$page_queue_age_cycles []],
   [[[0,0],0],0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-$mmt$page_queue_age_cycles []],
   [[[0,0],0],0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-$mmt$page_queue_age_cycles []],
   [[[0,0],0],0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-$mmt$page_queue_age_cycles []],
   [[[0,0],0],0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-$mmt$page_queue_age_cycles []],
   [[[0,0],0],0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-$mmt$page_queue_age_cycles []],
   [[[0,0],0],0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-$mmt$page_queue_age_cycles []],
   [[[0,0],0],0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-$mmt$page_queue_age_cycles []]
  ],

{ Note: The following entries must be in the same order as defined in the ordinal mmt$mmu_memory_attributes

     [  [ 0, mmc$mmu_mvt_byte,    ^mmv$age_interval_ceiling],                  {mmc$mmu_ma_aic
        [ 0, mmc$mmu_mvt_byte,    ^mmv$age_interval_floor],                    {mmc$mmu_ma_aif
        [ 0, mmc$mmu_mvt_integer, ^mmv$aggressive_aging_level],                {mmc$mmu_ma_aal
        [ 0, mmc$mmu_mvt_integer, ^mmv$aggressive_aging_level_2],              {mmc$mmu_ma_aal2
        [ 0, mmc$mmu_mvt_integer, ^mmv$aging_algorithm],                       {mmc$mmu_ma_aa
        [ 0, mmc$mmu_mvt_integer, ^mmv$avail_modified_queue_max],              {mmc$mmu_ma_amqmax
        [ 0, mmc$mmu_mvt_integer, ^mmv$write_aged_out_pages],                  {mmc$mmu_ma_amqmin
        [ 0, mmc$mmu_mvt_integer, ^mmv$avail_mod_q_floor_min],                 {mmc$mmu_ma_amqfm
        [ 0, mmc$mmu_mvt_integer, ^mmv$avail_mod_q_decr_per_task],             {mmc$mmu_ma_amqdpt
        [ 0, mmc$mmu_mvt_integer, ^mmv$avail_mod_waitq_dequeue_int],           {mmc$mmu_ma_amwqdi
        [ 0, mmc$mmu_mvt_integer, ^mmv$mem_wait_q_dequeue_interval],           {mmc$mmu_ma_mwqdi
        [ 0, mmc$mmu_mvt_integer, ^mmv$io_error_q_age_interval],               {mmc$mmu_ma_ieqai
        [ 0, mmc$mmu_mvt_integer, ^mmv$jws_queue_age_interval],                {mmc$mmu_ma_jwsai
        [ 0, mmc$mmu_mvt_integer, ^mmv$min_avail_pages],                       {mmc$mmu_ma_minap
        [ 0, mmc$mmu_mvt_byte,    ^mmv$page_streaming_random_limit],           {mmc$mmu_ma_psrl
        [ 0, mmc$mmu_mvt_byte,    ^mmv$page_streaming_reads],                  {mmc$mmu_ma_psr
        [ 0, mmc$mmu_mvt_byte,    ^mmv$page_streaming_prestream],              {mmc$mmu_ma_psp
        [ 0, mmc$mmu_mvt_integer, ^mmv$page_streaming_threshold],              {mmc$mmu_ma_pst
        [ 0, mmc$mmu_mvt_integer, ^mmv$page_streaming_transfer],               {mmc$mmu_ma_psts
        [ 0, mmc$mmu_mvt_integer, ^mmv$periodic_call_interval],                {mmc$mmu_ma_pci
        [ 0, mmc$mmu_mvt_integer, ^mmv$shared_queue_age_interval],             {mmc$mmu_ma_swsai
        [ 0, mmc$mmu_mvt_integer, ^mmv$swapping_aic_modified],                 {mmc$mmu_ma_sam
        [ 0, mmc$mmu_mvt_integer, ^mmv$swapping_aic_unmodified],               {mmc$mmu_ma_sau
        [ 0, mmc$mmu_mvt_integer, ^mmv$tick_time],                             {mmc$mmu_ma_tt
        [ 0, mmc$mmu_mvt_integer, ^mmv$dynamic_available_floor],               {mmc$mmu_ma_daf
        [ 0, mmc$mmu_mvt_integer, ^mmv$dynamic_avail_emergency_min],           {mmc$mmu_ma_daem
        [ 0, mmc$mmu_mvt_integer, ^mmv$dynamic_available_max],                 {mmc$mmu_ma_damax
        [ 0, mmc$mmu_mvt_integer, ^mmv$dynamic_available_min],                 {mmc$mmu_ma_damin
        [ 0, mmc$mmu_mvt_integer, ^mmv$dynamic_neg_increment_max],             {mmc$mmu_ma_dnim
        [ 0, mmc$mmu_mvt_integer, ^mmv$dynamic_pos_increment_max] ]];          {mmc$mmu_ma_dpim
?? FMT (FORMAT := ON) ??

  VAR
    null_pva: 0 .. 0ffffffffffff(16),
    null_sva: [STATIC] ost$system_virtual_address := [0, 0],
    total_contig_pages_assigned: integer := 0,

    mmv$advise_in_aio_limit: [XDCL, #GATE] integer := 24,
    mmv$age_interval_ceiling: [XDCL, #GATE] 0 .. 255 := mmc$mmu_age_interval_ceiling,
    mmv$age_interval_floor: [XDCL, #GATE] 0 .. 255 := mmc$mmu_age_interval_floor,
    mmv$aggressive_aging_level: [XDCL, #GATE] integer := mmc$mmu_aggressive_aging_one,
    mmv$aggressive_aging_level_2: [XDCL, #GATE] integer := mmc$mmu_aggressive_aging_two,
    mmv$aging_statistics: [XDCL, #GATE] mmt$i_aging_statistics :=
          [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, [REP 31 of [0, 0, 0]], 0, 0, 0, 0, 0],
    mmv$aio_limit_count: [XDCL, #GATE] integer := 0,
    mmv$assign_contiguous_pass_cnt: [XDCL, #GATE] mmt$assign_contig_passes := [0, 0, 0],
    mmv$assign_contig_reject: [XDCL, #GATE] integer := 0,
    mmv$assign_multiple_pages: [XDCL, #GATE] integer := 50,
    mmv$assign_pages_purge_count: integer := 0,
    mmv$ast_p: [XDCL, #GATE] ^mmt$active_segment_table := NIL,
    mmv$async_work: [XDCL] mmt$async_work_list := [FALSE, FALSE, [0, 0], NIL],
    mmv$avail_mod_q_decr_per_task: [XDCL, #GATE] integer := mmc$mmu_avail_mod_q_dec_task,
    mmv$avail_mod_q_floor_min: [XDCL, #GATE] integer := mmc$mmu_avail_mod_q_floor_min,
    mmv$avail_mod_wait_queue: [XDCL] tmt$task_queue_link := [0, 0],
    mmv$avail_mod_waitq_dequeue_int: [XDCL, #GATE] integer := mmc$mmu_avail_mod_waitq_deq_int,
    mmv$avail_modified_queue_max: [XDCL, #GATE] integer := osc$max_page_frames,
    mmv$honor_avail_mod_q_max_ds: [XDCL, #GATE] boolean := FALSE,
    mmv$contiguous_mem_length_max: [XDCL, #GATE] ost$segment_length := 65536,
    mmv$dm_flag_on_write: [XDCL] 0 .. 0ffffffff(16) := 1,
    mmv$file_allocation_interval: [XDCL, #GATE] integer := 20000,
    mmv$jmtr_escaped_allocate: [XDCL, #GATE] integer := 0,
    mmv$last_active_shared_queue: [XDCL, #GATE] mmt$global_page_queue_index := mmc$pq_shared_site_05,
    mmv$last_segment_accessed: [XDCL, #GATE] ost$segment,
    mmv$lost_escaped_allocate: [XDCL, #GATE] integer := 0,
    mmv$max_pages_no_file: [XDCL, #GATE] integer := 15, {This constant is forced negative during deadstart
    { to disable transient segments til Space Mgr runs.
    mmv$max_working_set_size: [XDCL, #GATE] integer := 1000,
    mmv$maxws_aio_count: [XDCL, #GATE] integer := 0,
    mmv$maxws_aio_slowdown: [XDCL] integer := 60000000,
    mmv$maxws_aio_threshold: [XDCL, #GATE] integer := 10,
    mmv$maximum_write_span: [XDCL, #GATE] integer := 4000000(16),
    mmv$memory_wait_queue: [XDCL] tmt$task_queue_link := [0, 0],
    mmv$mem_wait_q_dequeue_interval: [XDCL, #GATE] integer := mmc$mmu_mem_waitd_waitq_deq_int,
    mmv$min_avail_pages: [XDCL, #GATE] integer := mmc$mmu_min_avail_pages,
    mmv$multiple_caches: [XDCL, #GATE] boolean := FALSE,
    mmv$multiple_page_maps: [XDCL, #GATE] boolean := FALSE,
    mmv$multi_page_write: [XDCL, #GATE] boolean := TRUE,
    mmv$no_memory_buffering: [XDCL, #GATE] boolean := FALSE,
    mmv$pages_per_new_page_fault: [XDCL, #GATE] 1 .. 8 := 1,
    mmv$page_skip_count: [XDCL, #GATE] integer := 16,
    mmv$page_streaming_prestream: [XDCL, #GATE] 0 .. 255 := mmc$mmu_ps_prestream,
    mmv$page_streaming_threshold: [XDCL, #GATE] integer := mmc$mmu_ps_threshold,
    mmv$page_streaming_transfer: [XDCL, #GATE] integer := mmc$mmu_ps_transfer_size,
    mmv$page_streaming_reads: [XDCL, #GATE] 0 .. 255 := mmc$mmu_ps_reads,
    mmv$page_streaming_random_limit: [XDCL, #GATE] 0 .. 255 := mmc$mmu_ps_random_limit,
    mmv$pages_for_overallocation: [XDCL, #GATE] integer := 16,
    mmv$paging_statistics: [XDCL, #GATE] mmt$paging_statistics :=
          [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]],
    mmv$pf_statistics: [XDCL, #GATE] mmt$i_pf_statistics := [REP 20 of 0],
    mmv$pf_sva_array: [XDCL] record
      next_i: integer,
      pf_recs: array [0 .. num_pf_recs - 1] of packed record
        pstatus_time: 0 .. 0ffff(16),
        sva: ost$system_virtual_address,
      recend,
    recend,
    mmv$pft_p: [XDCL, #GATE] ^mmt$page_frame_table := NIL,
    mmv$post_deadstart: [XDCL, #GATE] boolean := FALSE,
    mmv$pt_length: [XDCL, #GATE] integer,
    mmv$pt_p: [XDCL, #GATE] ^ost$page_table,
    mmv$reassignable_page_frames: [XDCL, #GATE] mmt$reassignable_page_frames,
    mmv$read_tu_execute: [XDCL, #GATE] 0 .. 0ffffffff(16) := 1,
    mmv$read_tu_read_write: [XDCL, #GATE] 0 .. 0ffffffff(16) := 1,
    mmv$refs_to_unrec_df_file_inhib: [XDCL, #GATE] integer := 0,
    mmv$refs_to_unrec_df_file_term: [XDCL, #GATE] integer := 0,
    mmv$reserved_page_count: [XDCL, #GATE] integer := 0,


{  Define the number of free and available pages that job scheduler tries to keep
{  available for all active jobs.

    mmv$resident_job_target: [XDCL, #GATE] integer := 60,

{! Define the number of pages that memory manager and job swapper will try to
{  keep available in 'now' + 'soon' reassignable memory.  If 'now' + 'soon'
{  is less than or equal to this value IO is initiated on jobs in the long
{  wait queue until this value is exceeded.

    mmv$sdtx_entry_size: [XDCL, #GATE] integer := #SIZE (mmt$segment_descriptor_extended),
    mmv$shared_pages_in_jws: [XDCL, #GATE] boolean := TRUE,
    mmv$swapping_aic_modified: [XDCL, #GATE] integer := mmc$mmu_swapping_aic_modified,
    mmv$swapping_aic_unmodified: [XDCL, #GATE] integer := mmc$mmu_swapping_aic_unmodified,
    mmv$tables_initialized: [XDCL, #GATE] boolean := FALSE,
*if $true(mmc$debug)
    mmv$test_reassign_asid: [XDCL] boolean := FALSE,
*ifend
    mmv$tick_time: [XDCL, #GATE] integer := mmc$mmu_tick_time,
    mmv$trap_page_fault: [XDCL, #GATE] boolean := FALSE,
    mmv$volume_wait_queue: [XDCL] tmt$task_queue_link := [0, 0],
    mmv$write_aged_out_pages: [XDCL, #GATE] integer := osc$max_page_frames,
    osv$page_size: [XDCL, #GATE] ost$page_size,
    syv$perf_keypoints_enabled: [XDCL, #GATE] syt$perf_keypoints_enabled :=
          [FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE],
    syv$recovering_job_count: [XDCL, #GATE] integer := 0,
    syv$refs_to_unrecovered_seg: [XDCL] integer := 0;


  CONST
    num_pf_recs = 256;

?? TITLE := 'MMP$PURGE_ALL_CACHE, MMP$PURGE_ALL_MAP - CACHE/MAP MANAGEMENT', EJECT ??
*copyc mmp$aste_pointer_from_pfti
*copyc mmp$purge_all_page_map
*copyc mmp$sva_purge_one_page_map
*copyc mmp$sva_purge_all_page_map
?? TITLE := 'mmp$update_eoi', EJECT ??

{
{ This procedure is called to update EOI (if necessary) after adding a page to a segment
{ or writing a page to disk.
{ If the beginning of the new page is beyond the current FDE EOI, then
{ the file EOI is set to the beginning of the next page.
{ NOTE:  This procedure does NOT set EOI back.  Any process which is going to shorten the
{ file must explicitly change EOI itself.
{ NOTE:  Currently there are no callers of this procedure with REASON = MMC$UER_EXACT_EOI.
{ If a caller uses that reason in the future, this procedure will need to be carefully
{ examined and changed.
{ NOTE:  All processes which change eoi_state to mmc$eoi_actual MUST free all pages beyond
{ beyond the page containing EOI.  (Currently mmp$mtr_set_get_segment_lenght is the only process
{ which sets state to actual.)  Failure to do so will result in EOI being messed up by this
{ procedure if a page beyond the actual EOI gets written out.
{
{      OFFSET - must be the FIRST byte of the page assigned/written. If multiple pages
{               are assigned, offset should the beginning of the page faulted for.
{


  PROCEDURE [INLINE] mmp$update_eoi
    (    fde_p: gft$locked_file_desc_entry_p;
         offset: ost$segment_offset;
         reason: mmt$update_eoi_reason);

    IF offset >= fde_p^.eoi_byte_address THEN
      fde_p^.eoi_byte_address := offset + osv$page_size;
      fde_p^.flags.eoi_modified := TRUE;
      IF reason = mmc$uer_multiple_pages_assigned THEN
        fde_p^.eoi_state := mmc$eoi_uncertain;
      ELSEIF reason = mmc$uer_page_assigned THEN
        fde_p^.eoi_state := mmc$eoi_rounded;

{     ELSE reason = mmc$uer_page_written (mmc$uer_exact_eoi is not passed to this procedure)
{       The only way a page being written could have an offset greater than EOI is if the state is uncertain.
{       Additional pages were assigned on a page fault; eoi_byte_address was set to the page faulted for.
{       Now one of those additional pages is being written.  The eoi_byte_address has been updated (a few
{       lines above) but the state MUST remain uncertain, because there may still be other addtional pages
{       assigned that are beyond EOI.
{       If the state were actual and the page was being written, the state could not be changed either, nor
{       should the eoi_byte_address be changed.  Because set_segment_length is the only process that sets
{       state to actual and it FREES all pages beyond the page containing EOI, there cannot be a page being
{       written whose offset is greater than the eoi_byte_address.

      IFEND;
    ELSEIF reason = mmc$uer_page_written THEN

{ This code fixes EOI for files which had EOI set during a previous deadstart, which had a different page
{ size.  Running previously with a smaller page size could have left the eoi_address set to what is now the
{ middle of the current page size.  NOTE however:  The EOI cannot be changed if the state is actual; the
{ user set the  EOI and it must be left alone.

      IF (offset + osv$page_size) > fde_p^.eoi_byte_address THEN
        IF fde_p^.eoi_state <> mmc$eoi_actual THEN
          fde_p^.flags.eoi_modified := TRUE;
          fde_p^.eoi_byte_address := offset + osv$page_size;
          fde_p^.eoi_state := mmc$eoi_rounded;
        IFEND;
      IFEND;
    IFEND;

  PROCEND mmp$update_eoi;

?? TITLE := 'MMP$DETERMINE_SHARED_QUEUE_ID', EJECT ??

{ This function determines the shared queue id for a file whose pages are going
{ to be kept in one of the shared queues.

  FUNCTION [XDCL, INLINE] mmp$determine_shared_queue_id
    (    fde_p: gft$locked_file_desc_entry_p;
         ste_p: ^mmt$segment_descriptor): mmt$page_frame_queue_id;

    IF fde_p^.file_kind = gfc$fk_job_permanent_file THEN
      IF (fde_p^.queue_ordinal <> 0) AND (fde_p^.queue_ordinal <= mmv$last_active_shared_queue) THEN
        mmp$determine_shared_queue_id := fde_p^.queue_ordinal;
      ELSEIF fde_p^.media = gfc$fm_served_file THEN
        mmp$determine_shared_queue_id := mmc$pq_shared_file_server;
      ELSEIF (ste_p^.ste.xp = osc$non_executable) OR (ste_p^.ste.wp <> osc$non_writable) THEN
        mmp$determine_shared_queue_id := mmc$pq_shared_pf_non_execute;
      ELSE
        mmp$determine_shared_queue_id := mmc$pq_shared_pf_execute;
      IFEND;
    ELSEIF fde_p^.file_kind = gfc$fk_catalog THEN
      mmp$determine_shared_queue_id := mmc$pq_shared_pf_non_execute;
    ELSEIF fde_p^.flags.global_template_file THEN
      mmp$determine_shared_queue_id := mmc$pq_shared_task_service;
    ELSEIF fde_p^.file_kind = gfc$fk_device_file THEN
      mmp$determine_shared_queue_id := mmc$pq_shared_device_file;
    ELSE
      mmp$determine_shared_queue_id := mmc$pq_shared_other;
    IFEND;

  FUNCEND mmp$determine_shared_queue_id;
?? TITLE := 'INITIALIZE_NEW_AST_ENTRY', EJECT ??

{--------------------------------------------------------------------------------------------
{This routine is called to assign and initialize a new AST entry for a segment.
{
{-------------------------------------------------------------------------------------------

  PROCEDURE [INLINE] initialize_new_ast_entry
    (    fde_p: gft$locked_file_desc_entry_p;
         segnum: ost$segment;
         ste_p: ^mmt$segment_descriptor;
         stxe_p: ^mmt$segment_descriptor_extended;
         cst_p: ^ost$cpu_state_table;
         force_to_global: boolean;
     VAR asid: ost$asid;
     VAR aste_p: ^mmt$active_segment_table_entry);

    VAR
      asti: mmt$ast_index,
      queue_id: mmt$page_frame_queue_id;

    mmp$assign_asid (asid, asti, aste_p);

    fde_p^.asti := asti;

    IF fde_p^.stack_for_ring = 0 THEN
      fde_p^.last_segment_number := segnum;
      fde_p^.global_task_id := cst_p^.taskid;
    IFEND;

    aste_p^.ijl_ordinal := cst_p^.ijl_ordinal;

    IF mmc$sa_wired IN stxe_p^.software_attribute_set THEN
      queue_id := mmc$pq_wired;
    ELSEIF mmc$sa_fixed IN stxe_p^.software_attribute_set THEN
      queue_id := mmc$pq_job_fixed;
    ELSEIF force_to_global OR (stxe_p^.access_state = mmc$sas_terminate_access) THEN
      aste_p^.ijl_ordinal := jmv$system_ijl_ordinal;
      queue_id := mmp$determine_shared_queue_id (fde_p, ste_p);
    ELSE
      queue_id := mmc$pq_job_working_set;
    IFEND;

    aste_p^.queue_id := queue_id;
    aste_p^.sfid := stxe_p^.sfid;

    aste_p^.include_pages_in_dump := mmf$include_pages_in_dump (segnum, fde_p, ste_p);

    ste_p^.ste.asid := asid;
    ste_p^.asti := asti;

  PROCEND initialize_new_ast_entry;

?? TITLE := '[inline] OK_TO_RELINK_TO_AVAIL_MODIFIED', EJECT ??

{
{ This function determines whether or not pages can be relinked to the
{ available modified queue.  Pages cannot be relinked to the available
{ modified queue if the number of pages in the available modified queue
{ exceeds the system attribute AVAIL_MODIFIED_QUEUE_MAX.  The size of the
{ available modified queue is limited because system performance degrades
{ severely when the number of pages in the free and available queues goes
{ too low as a result of flooding of the available modified queue.
{

  FUNCTION [INLINE] ok_to_relink_to_avail_modified: boolean;

    ok_to_relink_to_avail_modified :=
{           } (mmv$gpql [mmc$pq_avail_modified].pqle.count <= mmv$avail_modified_queue_max)
{        } OR ((NOT mmv$post_deadstart) AND (mmv$honor_avail_mod_q_max_ds = FALSE));

  FUNCEND ok_to_relink_to_avail_modified;

?? TITLE := 'SET_ASSIGN_ACTIVE', EJECT ??

{--------------------------------------------------------------------------------------------
{This routine is used to set the SDTX flags for a request that requires job mode work.
{      SDTX.ASSIGN_ACTIVE  has the following values
{            mmc$assign_active_null     - Null value
{            mmc$assign_active_escaped  - Implies escaped allocation
{             otherwise                 - Address that requires job mode work
{-------------------------------------------------------------------------------------------

  PROCEDURE [INLINE] set_assign_active
    (    stxe_p: ^mmt$segment_descriptor_extended;
         offset: ost$segment_offset);

    IF stxe_p^.assign_active = mmc$assign_active_null THEN
      stxe_p^.assign_active := offset;
    ELSE
      stxe_p^.assign_active := mmc$assign_active_escaped;
    IFEND;

  PROCEND set_assign_active;
?? TITLE := 'MMP$CONVERT_PVA - Convert job mode PVA to SVA' ??
?? EJECT ??

{--------------------------------------------------------------------------------------------------------
{Name:
{  mmp$convert_pva
{Purpose:
{  This routine converts a PVA relative to the CURRENT USER TASK
{  to an SVA and returns pointers to the SDTX, AST entries for the segment.
{--------------------------------------------------------------------------------------------------------

  PROCEDURE [XDCL] mmp$convert_pva
    (    p: ^cell;
         cst_p: ^ost$cpu_state_table;
     VAR xsva: ost$system_virtual_address;
     VAR fde_p: gft$locked_file_desc_entry_p;
     VAR aste_p: ^mmt$active_segment_table_entry;
     VAR ste_p: ^mmt$segment_descriptor;
     VAR stxe_p: ^mmt$segment_descriptor_extended);


    VAR
      asid: ost$asid,
      asti: mmt$ast_index,
      segnum: ost$segment,
      sva: ost$system_virtual_address, {use local var for performance
      force_to_global: boolean; {TRUE if determined page goes in global queue.}

    segnum := #SEGMENT (p);

    ste_p := mmp$get_sdt_entry_p (cst_p^.xcb_p, segnum);
    stxe_p := mmp$get_sdtx_entry_p (cst_p^.xcb_p, segnum);

    IF (segnum > cst_p^.xcb_p^.xp.segment_table_length) OR (ste_p^.ste.vl = osc$vl_invalid_entry) THEN
      mtp$error_stop ('MM - invalid PVA');
    IFEND;

*if $true(mmc$debug)
{ The following trap code is used periodically to trap escaped allocation. Please do not delete this code.

    IF (stxe_p^.assign_active <> mmc$assign_active_null) AND (cst_p^.xcb_p^.xp.p_register.pva.ring > 1) AND
          (cst_p^.xcb_p^.xp.trap_enable = osc$traps_enabled) AND NOT cst_p^.xcb_p^.stlc_allocation AND
          (osc$page_fault IN cst_p^.xcb_p^.xp.monitor_condition_register) AND
          NOT (osc$trap_exception IN cst_p^.xcb_p^.xp.monitor_condition_register) THEN
      mtp$error_stop ('PFP-CONVERT--Escaped Allocation.');
    IFEND;
*ifend

{! Does this really happen}
    IF stxe_p^.sfid.residence = gfc$tr_system_wait_recovery THEN
      syv$refs_to_unrecovered_seg := syv$refs_to_unrecovered_seg + 1;
      aste_p := NIL;
      RETURN; {----->
    IFEND;

    sva.asid := ste_p^.ste.asid;
    sva.offset := #OFFSET (p);

    gfp$mtr_get_locked_fde_p (stxe_p^.sfid, cst_p^.ijle_p, fde_p);

    IF sva.asid <> 0 THEN
      aste_p := ^mmv$ast_p^ [fde_p^.asti];
*if $true(mmc$debug)
      IF ((fde_p^.asti <> ste_p^.asti) OR (aste_p^.sfid <> stxe_p^.sfid)) THEN
        mtp$error_stop ('MM - bad tables in CONVERT_PVA');
      IFEND;
*ifend
    ELSE
      #PURGE_BUFFER (osc$purge_all_page_seg_map, null_pva); { only job mode segment map purge is required}
      force_to_global := (fde_p^.queue_status = gfc$qs_global_shared) OR (fde_p^.attach_count > 1);

      mmp$get_verify_asti_in_fde (fde_p, stxe_p^.sfid, cst_p^.ijl_ordinal, asti);

      IF (asti = 0) THEN
        initialize_new_ast_entry (fde_p, segnum, ste_p, stxe_p, cst_p, force_to_global, asid, aste_p);
      ELSE
        mmp$asid (asti, asid);
        ste_p^.ste.asid := asid;
        ste_p^.asti := asti;

{ Determine which queue (JWS or Shared) this file should be in by looking at FORCE_TO_GLOBAL but
{ with two important exception.  (1) If there are currently pages in memory and the current queue is not JWS,
{ then the file was recently global and there are still pages in memory--maybe in the global queue, maybe
{ in the available queue.  In this case we must leave the pages in the global queue, or we may end up with
{ modified pages in both the jws and global queues-this is a problem when it is time to write modified pages.
{ If the file belongs in a JWS queue but has pages in memory it's queue must not be changed.
{ Also, if it is currently in the JWS queue, the ijl_ordinal must be reset because it may point
{ to an old ijl.  (2) If this file is a shared file server file that has been terminated, the
{ fde_p^.attach_count has been decrement and no longer reflects the actual attached count.  Also the
{ ste.asid has been zeroed and there will be no pages in memory.  If pages are in the shared file queue,
{ leave the aste alone.

        aste_p := ^mmv$ast_p^ [asti];
        IF NOT force_to_global AND ((fde_p^.media <> gfc$fm_served_file) OR
              (aste_p^.queue_id = mmc$pq_job_working_set)) THEN
          IF (aste_p^.pages_in_memory = 0) OR (aste_p^.queue_id = mmc$pq_job_working_set) THEN
            aste_p^.queue_id := mmc$pq_job_working_set;
            aste_p^.ijl_ordinal := cst_p^.ijl_ordinal;
          IFEND;
        ELSEIF aste_p^.queue_id > mmc$pq_shared_last THEN

{ The file belongs in the shared queue.  Make sure the AST entry is correct.

          aste_p^.ijl_ordinal := jmv$system_ijl_ordinal;
          aste_p^.queue_id := mmp$determine_shared_queue_id (fde_p, ste_p);
        IFEND; {not force_to_global}

      IFEND; {asti = 0}
      sva.asid := asid;
    IFEND; {asid <> 0}

*if $true(mmc$debug)
    IF ((aste_p^.ijl_ordinal <> cst_p^.ijl_ordinal) AND (aste_p^.ijl_ordinal <> jmv$system_ijl_ordinal)) THEN
      mtp$error_stop ('MM - Bad IJLO in CONVERT_PVA');
    IFEND;
*ifend

    xsva := sva;

  PROCEND mmp$convert_pva;
?? TITLE := 'MMP$VERIFY_PVA - Test job mode PVA to see if its valid' ??
?? EJECT ??
{
{ Purpose:
{    This routine verifies a PVA relative to the CURRENT USER TASK
{    and returns an error code if its not valid.
{

  PROCEDURE [XDCL] mmp$verify_pva
    (    p: ^cell;
         segment_access: mmt$segment_access_type;
     VAR status: syt$monitor_status);

    VAR
{     cst_p: ^ost$cpu_state_table,
      ring: 0 .. 15,
      pva_p: ^ost$pva,
      ste_p: ^mmt$segment_descriptor,
      stxe_p: ^mmt$segment_descriptor_extended,
      segnum: ost$segment,
      xcb_p: ^ost$execution_control_block;

    status.normal := TRUE;
{   mtp$cst_p (cst_p);

    pva_p := p;
    ring := pva_p^.ring;

    IF (ring = 0) OR (pva_p^.offset < 0) THEN
      mtp$set_status_abnormal ('MM', mme$invalid_pva, status);
      RETURN; {----->
    IFEND;

    xcb_p := mtf$cst_p()^.xcb_p;
    IF ring < xcb_p^.xp.p_register.pva.ring THEN
      ring := xcb_p^.xp.p_register.pva.ring;
    IFEND;

    segnum := pva_p^.seg;
    ste_p := mmp$get_sdt_entry_p (xcb_p, segnum);
    stxe_p := mmp$get_sdtx_entry_p (xcb_p, segnum);

    IF (segnum > xcb_p^.xp.segment_table_length) OR (ste_p^.ste.vl = osc$vl_invalid_entry) THEN
      mtp$set_status_abnormal ('MM', mme$invalid_pva, status);
    ELSEIF mtv$sys_core_init_complete THEN
      IF (segment_access = mmc$sat_read) AND ((ring > ste_p^.ste.r2) OR (ste_p^.ste.rp = osc$non_readable))
            THEN
        mtp$set_status_abnormal ('MM', mme$invalid_pva, status);
      ELSEIF (segment_access = mmc$sat_write) AND ((ring > ste_p^.ste.r1) OR
            (ste_p^.ste.wp = osc$non_writable)) THEN
        mtp$set_status_abnormal ('MM', mme$invalid_pva, status);
      ELSEIF (segment_access = mmc$sat_read_or_write) AND (ring > ste_p^.ste.r2) THEN
        mtp$set_status_abnormal ('MM', mme$invalid_pva, status);
      ELSEIF (stxe_p^.sfid.residence = gfc$tr_system_wait_recovery) THEN
        syv$refs_to_unrecovered_seg := syv$refs_to_unrecovered_seg + 1;
        mtp$set_status_abnormal ('MM', mme$ref_to_unrecovered_file, status);
      IFEND;
    IFEND;

  PROCEND mmp$verify_pva;

?? TITLE := 'MMP$XTASK_PVA_TO_SVA - Convert job mode PVA to SVA' ??
?? EJECT ??
*copyc mmh$xtask_pva_to_sva

  PROCEDURE [XDCL] mmp$xtask_pva_to_sva
    (    p: ^cell;
     VAR sva: ost$system_virtual_address;
     VAR status: syt$monitor_status);

    VAR
{     cst_p: ^ost$cpu_state_table,
      fde_p: gft$locked_file_desc_entry_p,
      ste_p: ^mmt$segment_descriptor,
      stxe_p: ^mmt$segment_descriptor_extended,
      aste_p: ^mmt$active_segment_table_entry;

    mmp$verify_pva (^p, mmc$sat_read_or_write, status);
    IF status.normal THEN
{     mtp$cst_p (cst_p);
      mmp$convert_pva (p, mtf$cst_p(), sva, fde_p, aste_p, ste_p, stxe_p);
    IFEND;

  PROCEND mmp$xtask_pva_to_sva;
?? TITLE := 'GET_AVAILABLE_PAGE_FRAME' ??
?? EJECT ??

{--------------------------------------------------------------------------------------------------------
{Name:
{    mmp$get_avail_page_frame
{Purpose:
{   This routine is called to find a free or available page frame.
{   No assignment of the page frame is made by this procedure.
{Input:
{   none
{Output:
{   pfti - Page Frame Table index of an available page frame. A value
{         of zero indicates no page frame was available.
{Error Codes:
{  none
{--------------------------------------------------------------------------------------------------------


  PROCEDURE [XDCL, INLINE] mmp$get_avail_page_frame
    (VAR pfti: mmt$page_frame_index);

    VAR
      job_found: boolean;

    pfti := mmv$gpql [mmc$pq_free].pqle.link.bkw;
    IF pfti = 0 THEN

      IF (mmv$gpql [mmc$pq_avail].pqle.count <= mmv$min_avail_pages) AND mmv$post_deadstart AND
            (mmv$gpql [mmc$pq_free].pqle.count + mmv$gpql [mmc$pq_avail].pqle.count <
            mmv$reassignable_page_frames.now) THEN
        jsp$free_swapped_jobs_memory (jmv$null_ijl_ordinal, {S2_QUEUE_ONLY} FALSE, job_found);
        pfti := mmv$gpql [mmc$pq_free].pqle.link.bkw;
      IFEND;
      IF pfti = 0 THEN
        pfti := mmv$gpql [mmc$pq_avail].pqle.link.bkw;
        IF pfti <> 0 THEN
          mmp$delete_pt_entry (pfti, TRUE);
        IFEND;
      IFEND;
    IFEND;

  PROCEND mmp$get_avail_page_frame;
?? TITLE := 'MMP$RELINK_PAGE_FRAME' ??
?? EJECT ??

{--------------------------------------------------------------------------------------------------------
{  This procedure moves a page frame from its current position
{  in a page queue to the head of a new queue identified by
{  the caller.
{--------------------------------------------------------------------------------------------------------

  PROCEDURE [XDCL] mmp$relink_page_frame
    (    pfti: mmt$page_frame_index;
         new_queue_id: mmt$page_frame_queue_id);

    VAR
      ijle_p: ^jmt$initiated_job_list_entry,
      old_queue_id: mmt$page_frame_queue_id,
      pfte_p: ^mmt$page_frame_table_entry,
      qcb_p: ^mmt$page_queue_list_entry,
      queue_id: mmt$page_frame_queue_id,
      taskid: ost$global_task_id;

    pfte_p := ^mmv$pft_p^ [pfti];
    queue_id := new_queue_id;
    old_queue_id := pfte_p^.queue_id;

    IF old_queue_id < mmc$pq_job_base THEN
      IF old_queue_id <= mmc$pq_last_reassignable THEN
        mmv$reassignable_page_frames.now := mmv$reassignable_page_frames.now - 1;
      ELSEIF (old_queue_id = mmc$pq_avail_modified) AND NOT mmv$pt_p^ [pfte_p^.pti].m THEN
        mmv$reassignable_page_frames.soon := mmv$reassignable_page_frames.soon - 1;
      IFEND;
      qcb_p := ^mmv$gpql [old_queue_id].pqle;
    ELSE
      ijle_p := jmf$ijle_p (pfte_p^.ijl_ordinal);
      qcb_p := ^ijle_p^.job_page_queue_list [old_queue_id];
    IFEND;
    IF pfte_p^.link.fwd = 0 THEN
      qcb_p^.link.bkw := pfte_p^.link.bkw;
    ELSE
      mmv$pft_p^ [pfte_p^.link.fwd].link.bkw := pfte_p^.link.bkw;
    IFEND;
    IF pfte_p^.link.bkw = 0 THEN
      qcb_p^.link.fwd := pfte_p^.link.fwd;
    ELSE
      mmv$pft_p^ [pfte_p^.link.bkw].link.fwd := pfte_p^.link.fwd;
    IFEND;
    qcb_p^.count := qcb_p^.count - 1;

    pfte_p^.link.bkw := 0;
    IF (queue_id <= mmc$pq_last_reassignable) AND (pfte_p^.active_io_count > 0) THEN
*if $true(mmc$debug)
      IF (queue_id <> mmc$pq_free) THEN
        mtp$error_stop ('MM - relink 234');
      IFEND;
*ifend
      mmv$reassignable_page_frames.soon := mmv$reassignable_page_frames.soon + 1;
      pfte_p^.link.fwd := 0;
    ELSE
      IF queue_id < mmc$pq_job_base THEN
        IF (queue_id <= mmc$pq_last_reassignable) THEN
*if $true(mmc$debug)
          IF ((queue_id = mmc$pq_avail) AND ((mmv$pt_p^ [pfte_p^.pti].m) OR (mmv$pt_p^ [pfte_p^.pti].v))) THEN
            mtp$error_stop ('MM - relink - trapped modified/valid page in avail');
          IFEND;
*ifend
          IF pfte_p^.flawed THEN
            IF (queue_id = mmc$pq_avail) THEN
              mmp$delete_pt_entry (pfti, TRUE);
            IFEND;
            queue_id := mmc$pq_flawed;
          ELSE
            mmv$reassignable_page_frames.now := mmv$reassignable_page_frames.now + 1;

{ Check if scheduler is waiting for memory and ready scheduler if necessary.

            jmp$check_scheduler_memory_wait;
          IFEND;

          IF (queue_id = mmc$pq_free) THEN
            mmv$pages_to_dump_p^ [pfti] := FALSE;
          IFEND;

        ELSEIF (queue_id = mmc$pq_avail_modified) AND NOT mmv$pt_p^ [pfte_p^.pti].m THEN
*if $true(mmc$debug)
          IF pfte_p^.active_io_count = 0 THEN
            mtp$error_stop ('MM - no IO');
          IFEND;
*ifend
          mmv$reassignable_page_frames.soon := mmv$reassignable_page_frames.soon + 1;
        IFEND;
        qcb_p := ^mmv$gpql [queue_id].pqle;
      ELSE
        ijle_p := jmf$ijle_p (pfte_p^.ijl_ordinal);
        qcb_p := ^ijle_p^.job_page_queue_list [queue_id];
      IFEND;
      IF qcb_p^.link.fwd = 0 THEN
        qcb_p^.link.bkw := pfti;
      ELSE
        mmv$pft_p^ [qcb_p^.link.fwd].link.bkw := pfti;
      IFEND;
      pfte_p^.link.fwd := qcb_p^.link.fwd;
      qcb_p^.link.fwd := pfti;
      qcb_p^.count := qcb_p^.count + 1;
    IFEND;

    pfte_p^.queue_id := queue_id;
    IF (queue_id = mmc$pq_free) OR (queue_id = mmc$pq_flawed) THEN
      pfte_p^.sva.asid := 0; { DONT clear offset - required by mmp$change_asid}

{! delete until DM deletes active IO count - this code causes a timing problem swapping
{! out a job that has recently deleted a segment that had pages being written to disk.
{ IF pfte_p^.active_io_count <> 0 THEN
{ jmp$get_ijle_p (pfte_p^.ijl_ordinal, ijle_p);
{ jmv$ajl_p^ [ijle_p^.ajl_ordinal].active_io_page_count := jmv$ajl_p^ [ijle_p^.ajl_ordinal].
{ active_io_page_count - pfte_p^.active_io_count;
{ IFEND;

    IFEND;

    IF (mmv$memory_wait_queue.head <> 0) THEN
      IF (queue_id <= mmc$pq_last_reassignable) AND (pfte_p^.active_io_count = 0) THEN
        tmp$dequeue_task (mmv$memory_wait_queue, taskid);
      IFEND;
    IFEND;

    mmp$check_queues;

  PROCEND mmp$relink_page_frame;
?? SKIP := 2 ??
*copyc mmp$link_page_frame_to_queue
?? TITLE := 'MMP$MARK_PAGE_FLAWED' ??
?? EJECT ??

{--------------------------------------------------------------------------------------------------------
{  This procedure marks a page as flawed.  If the page is in the available
{  queue or the free queue move it to the flawed queue so that it will no
{  longer be available for use.  If the page is in use it will be removed
{  when it is released.  Flawed pages are only on the CY2000.  The Service
{  Processor has notified the OS via a NRSB entry.
{--------------------------------------------------------------------------------------------------------

  PROCEDURE [XDCL] mmp$mark_page_flawed
    (    pfti: mmt$page_frame_index);

    VAR
      pfte_p: ^mmt$page_frame_table_entry;

    IF NOT mmv$tables_initialized THEN
      RETURN; {----->
    IFEND;

    IF (pfti < LOWERBOUND (mmv$pft_p^)) OR (pfti > UPPERBOUND (mmv$pft_p^)) THEN
      RETURN; {----->
    IFEND;

    pfte_p := ^mmv$pft_p^ [pfti];
    pfte_p^.flawed := TRUE;

    IF (pfte_p^.queue_id <= mmc$pq_last_reassignable) THEN
      IF (pfte_p^.queue_id = mmc$pq_avail) THEN
        mmp$delete_pt_entry (pfti, TRUE);
      IFEND;
      mmp$relink_page_frame (pfti, mmc$pq_flawed);
    IFEND;

  PROCEND mmp$mark_page_flawed;
?? TITLE := 'MMP$CLAIM_PAGES_FOR_SWAPIN', EJECT ??

{---------------------------------------------------------------------------------------------------
{  This procedure is used by the job swapper to claim a large number of pages on swapin.
{---------------------------------------------------------------------------------------------------


  PROCEDURE [XDCL] mmp$claim_pages_for_swapin
    (    swapped_job_entry: jmt$swapped_job_entry;
         aste_p: ^mmt$active_segment_table_entry;
         ijl_ordinal: jmt$ijl_ordinal;
     VAR job_page_queue_list: mmt$job_page_queue_list;
     VAR status: syt$monitor_status);

    VAR
      count: integer,
      first_pfti: mmt$page_frame_index,
      ijl_p: ^jmt$initiated_job_list_entry,
      job_found: boolean,
      pfte_p: ^mmt$page_frame_table_entry,
      pfti: mmt$page_frame_index,
      queue_count: integer,
      queue_id: mmt$page_frame_queue_id,
      source_queue_id: mmt$page_frame_queue_id;

    status.normal := TRUE;

    count := 0;
    ijl_p := jmf$ijle_p (ijl_ordinal);
    FOR queue_id := LOWERVALUE (mmt$job_page_queue_index) TO UPPERVALUE (mmt$job_page_queue_index) DO
      count := swapped_job_entry.job_page_queue_count [queue_id] + count;
      IF (queue_id = mmc$pq_job_fixed) AND NOT (jmc$dsw_job_recovery IN ijl_p^.delayed_swapin_work) THEN
        count := count - ijl_p^.job_fixed_contiguous_pages;
      IFEND;
    FOREND;

    WHILE (mmv$gpql [mmc$pq_free].pqle.count < count) AND mmv$post_deadstart AND
          (mmv$gpql [mmc$pq_free].pqle.count + mmv$gpql [mmc$pq_avail].pqle.count - count <=
          mmv$min_avail_pages) AND (mmv$gpql [mmc$pq_free].pqle.count + mmv$gpql [mmc$pq_avail].pqle.count <
          mmv$reassignable_page_frames.now) DO
      jsp$free_swapped_jobs_memory (jmv$null_ijl_ordinal, {S2_QUEUE_ONLY} TRUE, job_found);
      IF NOT job_found THEN
        mtp$set_status_abnormal ('MM', mme$no_free_pages, status);
        RETURN; {----->
      IFEND;
    WHILEND;

    source_queue_id := mmc$pq_free;

  /claim_pages/
    FOR queue_id := LOWERVALUE (mmt$job_page_queue_index) TO UPPERVALUE (mmt$job_page_queue_index) DO
      count := swapped_job_entry.job_page_queue_count [queue_id];
      IF count = 0 THEN
        CYCLE /claim_pages/; {----->
      IFEND;
      job_page_queue_list [queue_id].count := count;
      IF (queue_id = mmc$pq_job_fixed) AND NOT (jmc$dsw_job_recovery IN ijl_p^.delayed_swapin_work) THEN
        count := count - ijl_p^.job_fixed_contiguous_pages;
      IFEND;
      mmv$reassignable_page_frames.now := mmv$reassignable_page_frames.now - count;
      REPEAT
        queue_count := count;
        pfti := mmv$gpql [source_queue_id].pqle.link.bkw;
        IF pfti = 0 THEN
          IF source_queue_id = mmc$pq_avail THEN
            mtp$error_stop ('MM - no memory for claim pages');
          IFEND;
          source_queue_id := mmc$pq_avail;
          pfti := mmv$gpql [mmc$pq_avail].pqle.link.bkw;
        IFEND;
        first_pfti := pfti;
        WHILE (count > 0) AND (pfti <> 0) DO
          pfte_p := ^mmv$pft_p^ [pfti];
          IF source_queue_id = mmc$pq_avail THEN
            mmp$delete_pt_entry (pfti, TRUE);
            pfte_p^.sva := null_sva;
          IFEND;
          pfte_p^.ijl_ordinal := ijl_ordinal;
          pfte_p^.aste_p := aste_p;
          pfte_p^.queue_id := queue_id;
          count := count - 1;
          pfti := pfte_p^.link.bkw;
        WHILEND;

        mmv$gpql [source_queue_id].pqle.count := mmv$gpql [source_queue_id].pqle.count - queue_count + count;
        IF job_page_queue_list [queue_id].link.bkw = 0 THEN
          job_page_queue_list [queue_id].link.bkw := first_pfti;
        ELSE
          mmv$pft_p^ [job_page_queue_list [queue_id].link.fwd].link.bkw := first_pfti;
          mmv$pft_p^ [first_pfti].link.fwd := job_page_queue_list [queue_id].link.fwd;
        IFEND;
        IF pfti = 0 THEN
          job_page_queue_list [queue_id].link.fwd := mmv$gpql [source_queue_id].pqle.link.fwd;
          mmv$gpql [source_queue_id].pqle.link.bkw := 0;
          mmv$gpql [source_queue_id].pqle.link.fwd := 0;
        ELSE
          job_page_queue_list [queue_id].link.fwd := mmv$pft_p^ [pfti].link.fwd;
          mmv$pft_p^ [mmv$pft_p^ [pfti].link.fwd].link.bkw := 0;
          mmv$gpql [source_queue_id].pqle.link.bkw := pfti;
          mmv$pft_p^ [pfti].link.fwd := 0;
        IFEND;
      UNTIL count = 0;
    FOREND /claim_pages/;

    mmp$check_queues;

  PROCEND mmp$claim_pages_for_swapin;

?? TITLE := 'MMP$FREE_MEMORY_IN_JOB_QUEUES' ??
?? EJECT ??

{---------------------------------------------------------------------------------------------------
{
{ The purpose of this procedure is to efficiently relink ALL the pages in a jobs page queue list
{ to the free queue.
{
{---------------------------------------------------------------------------------------------------

  PROCEDURE [XDCL] mmp$free_memory_in_job_queues
    (VAR job_page_queue_list: mmt$job_page_queue_list;
         increment_now: boolean;
         decrement_soon: boolean;
         job_termination: boolean);

    VAR
      asid: ost$asid,
      aste_p: ^mmt$active_segment_table_entry,
      count: integer,
      found: boolean,
      ijl_p: ^jmt$initiated_job_list_entry,
      index: integer,
      ipti: integer,
      last_contiguous_pfti: mmt$page_frame_index,
      hcount: 1 .. 32,
      next_pfti: mmt$page_frame_index,
      original_bkw_link: mmt$page_frame_index,
      pfte_p: ^mmt$page_frame_table_entry,
      pfti: mmt$page_frame_index,
      queue_id: mmt$job_page_queue_index,
      taskid: ost$global_task_id,
      total_pages_freed: integer;

    total_pages_freed := 0;

    FOR queue_id := UPPERVALUE (mmt$job_page_queue_index) DOWNTO LOWERVALUE (mmt$job_page_queue_index) DO
      pfti := job_page_queue_list [queue_id].link.bkw;
      original_bkw_link := job_page_queue_list [queue_id].link.bkw;

{ If we are freeing the job-fixed queue, we must verify that there are not any
{ contiguous pages assigned. If there are contiguous pages assigned, it is
{ necessary to determine where in the queue the non-contiguous pages begin.
{ The contiguous pages are ALWAYS at the very beginning of the job-fixed
{ page queue. Contiguous pages are not freed.
{ NOTE: If pages have ASID = 0, then frames are being freed after aborted swapin.

      IF pfti <> 0 THEN
        IF (queue_id = mmc$pq_job_fixed) THEN
          ijl_p := jmf$ijle_p (mmv$pft_p^ [pfti].ijl_ordinal);
          IF (ijl_p^.job_fixed_contiguous_pages <> 0) AND NOT job_termination AND
                NOT (jmc$dsw_job_recovery IN ijl_p^.delayed_swapin_work) THEN
            FOR index := 1 TO ijl_p^.job_fixed_contiguous_pages DO
              last_contiguous_pfti := pfti;
              #HASH_SVA (mmv$pft_p^ [pfti].sva, ipti, hcount, found);
              IF found THEN
                mmp$delete_pt_entry (pfti, TRUE);
              IFEND;
              pfti := mmv$pft_p^ [pfti].link.bkw;
            FOREND;
          IFEND;
        IFEND;

        WHILE pfti <> 0 DO
          pfte_p := ^mmv$pft_p^ [pfti];
          asid := pfte_p^.sva.asid;
          next_pfti := pfte_p^.link.bkw;

          IF asid <> 0 THEN
            IF pfte_p^.active_io_count = 0 THEN
              mmp$delete_pt_entry (pfti, TRUE);
              mmp$aste_pointer_from_pfti (pfti, aste_p);
              IF aste_p^.pages_in_memory = 0 THEN
                IF aste_p^.sfid.residence = gfc$tr_job THEN
                  mmp$free_asid (asid, aste_p);
                IFEND;
              IFEND;
              pfte_p^.sva := null_sva;
              IF pfte_p^.flawed THEN
                mmp$relink_page_frame (pfti, mmc$pq_flawed);
                IF decrement_soon THEN
                  mmv$reassignable_page_frames.soon := mmv$reassignable_page_frames.soon - 1;
                IFEND;
                IF NOT increment_now THEN
                  mmv$reassignable_page_frames.now := mmv$reassignable_page_frames.now - 1;
                IFEND;
              ELSE
                pfte_p^.queue_id := mmc$pq_free;
                mmv$pages_to_dump_p^ [pfti] := FALSE;
              IFEND;
            ELSE

{  IO is still active on a local file or a shared file.

              IF (pfte_p^.aste_p^.sfid.residence = gfc$tr_job) AND (job_termination) THEN
                mmv$pt_p^ [pfte_p^.pti].v := FALSE;
                mmp$delete_pt_entry (pfti, TRUE);
                mmp$relink_page_frame (pfti, mmc$pq_free);
                IF decrement_soon THEN
                  mmv$reassignable_page_frames.soon := mmv$reassignable_page_frames.soon - 1;
                IFEND;
              ELSE
                mmv$pt_p^ [pfte_p^.pti].v := FALSE;
                mmp$relink_page_frame (pfti, mmc$pq_avail_modified);
                IF decrement_soon THEN
                  mmv$reassignable_page_frames.soon := mmv$reassignable_page_frames.soon - 1;
                IFEND;

{ If NOW was already incremented, OC to S2, then decrement NOW since IO is active.
{ NOW will be incremented when IO completes.  This applies to local files only.
{ NOW count is updated for shared files when movement from JWS to shared queues takes place.

                IF (NOT increment_now) AND (pfte_p^.aste_p^.sfid.residence = gfc$tr_job) THEN
                  mmv$reassignable_page_frames.now := mmv$reassignable_page_frames.now - 1;
                IFEND;
              IFEND;
            IFEND;
          ELSE

            IF pfte_p^.flawed THEN
              mmp$relink_page_frame (pfti, mmc$pq_flawed);
              IF decrement_soon THEN
                mmv$reassignable_page_frames.soon := mmv$reassignable_page_frames.soon - 1;
              IFEND;
              IF NOT increment_now THEN
                mmv$reassignable_page_frames.now := mmv$reassignable_page_frames.now - 1;
              IFEND;
            ELSE
              pfte_p^.queue_id := mmc$pq_free;
              mmv$pages_to_dump_p^ [pfti] := FALSE;
            IFEND;
          IFEND;
          pfti := next_pfti;
        WHILEND;

{ The contiguous pages assigned to the job-fixed segment must not be counted
{ when freeing pages in the queues.

        IF (queue_id = mmc$pq_job_fixed) AND (ijl_p^.job_fixed_contiguous_pages <> 0) AND
              NOT (jmc$dsw_job_recovery IN ijl_p^.delayed_swapin_work) AND NOT job_termination THEN
          count := job_page_queue_list [queue_id].count - ijl_p^.job_fixed_contiguous_pages;
          job_page_queue_list [queue_id].link.bkw := mmv$pft_p^ [last_contiguous_pfti].link.bkw;
        ELSE
          count := job_page_queue_list [queue_id].count;
        IFEND;
        IF count > 0 THEN
          IF mmv$gpql [mmc$pq_free].pqle.link.bkw = 0 THEN
            mmv$gpql [mmc$pq_free].pqle.link.bkw := job_page_queue_list [queue_id].link.bkw;
          ELSE
            mmv$pft_p^ [mmv$gpql [mmc$pq_free].pqle.link.fwd].link.bkw := job_page_queue_list [queue_id].
                  link.bkw;
            mmv$pft_p^ [job_page_queue_list [queue_id].link.bkw].link.fwd := mmv$gpql [mmc$pq_free].
                  pqle.link.fwd;
          IFEND;
          mmv$gpql [mmc$pq_free].pqle.link.fwd := job_page_queue_list [queue_id].link.fwd;
          IF (queue_id = mmc$pq_job_fixed) AND (ijl_p^.job_fixed_contiguous_pages <> 0) AND
                NOT (jmc$dsw_job_recovery IN ijl_p^.delayed_swapin_work) AND NOT job_termination THEN
            job_page_queue_list [queue_id].link.fwd := last_contiguous_pfti;
            job_page_queue_list [queue_id].link.bkw := original_bkw_link;
            job_page_queue_list [queue_id].count := ijl_p^.job_fixed_contiguous_pages;
          ELSE
            job_page_queue_list [queue_id].link.fwd := 0;
            job_page_queue_list [queue_id].link.bkw := 0;
            job_page_queue_list [queue_id].count := 0;
          IFEND;
          mmv$gpql [mmc$pq_free].pqle.count := mmv$gpql [mmc$pq_free].pqle.count + count;
          IF increment_now THEN
            mmv$reassignable_page_frames.now := mmv$reassignable_page_frames.now + count;
          IFEND;
          IF decrement_soon THEN
            mmv$reassignable_page_frames.soon := mmv$reassignable_page_frames.soon - count;
          IFEND;
          total_pages_freed := total_pages_freed + count;
        IFEND;
      IFEND;
    FOREND;

    IF increment_now THEN

{ Check if scheduler is waiting for memory and ready scheduler if necessary; this check is
{ necessary anytime page_frames.now is incremented, but don't do it inside a loop.

      jmp$check_scheduler_memory_wait;

    IFEND;

    WHILE (mmv$memory_wait_queue.head <> 0) AND (total_pages_freed > 0) DO
      total_pages_freed := total_pages_freed - 1;
      tmp$dequeue_task (mmv$memory_wait_queue, taskid);
    WHILEND;

    mmp$check_queues;

  PROCEND mmp$free_memory_in_job_queues;
?? TITLE := 'PF_PROC_TABLES_NOT_INITIALIZED' ??
?? EJECT ??

{--------------------------------------------------------------------------------------------------------
{Name:
{  pf_proc_tables_not_initialized
{Purpose:
{  This routine is called to process page faults which occur before
{  the PQL, PFT, and AST have been initialized. The routine assigns
{  an available page frame and makes the page table entry for the
{  page.
{--------------------------------------------------------------------------------------------------------


  PROCEDURE pf_proc_tables_not_initialized
    (    xcb_p: ^ost$execution_control_block);

    VAR
      sva: ost$system_virtual_address,
      ste_p: ^mmt$segment_descriptor,
      static_next_rma: [STATIC] integer := 7fffffff(16),
      static_stop_rma: [STATIC] integer := 7fffffff(16),
      next_rma: integer,
      stop_rma: integer,
      pte: ost$page_table_entry,
      count: 1 .. 32,
      found: boolean,
      full_scan_has_been_done: boolean,
      pt_length: integer,
      pt_p: ^ost$page_table,
      pti: integer;


    pt_p := mmv$pt_p;
    pt_length := mmv$pt_length;
    ste_p := mmp$get_sdt_entry_p (xcb_p, xcb_p^.xp.untranslatable_pointer.seg);
    sva.asid := ste_p^.ste.asid;
    sva.offset := xcb_p^.xp.untranslatable_pointer.offset;
    sva.offset := (sva.offset DIV osv$page_size) * osv$page_size;


{The following loop is somewhat obscure but is structured for best performance during deadstart.
{The loop sets <NEXT_RMA> .. <STOP_RMA> to point to the next block of free pages that can be assigned.
{The block is determined by scanning the page table. The FIRST time thru the loop, it locates the
{large block at the end of memory. (Deadstart loads most of the OS at the beginning of memory.)
{Subsequent passes thru the loop locate the next block following the block that was just assigned.
{   (The loop has been optimized by looking at the object code generated and adjusting
{    the source to get good object code).

?? PUSH (CHKALL := OFF) ??
    IF static_next_rma = static_stop_rma THEN

{  Locate the starting point of the next block.

      next_rma := static_next_rma;
      stop_rma := static_stop_rma;
      IF next_rma = 7fffffff(16) THEN
        next_rma := osv$180_memory_limits.lower DIV 512;
        stop_rma := (osv$180_memory_limits.deadstart_upper DIV 512);
        pti := pt_length;
        REPEAT
          pti := pti - 1;
          pte := pt_p^ [pti];
          IF pte.v AND (pte.rma > next_rma) AND (pte.rma < stop_rma) THEN
            next_rma := pte.rma;
          IFEND;
        UNTIL pti = 0;
        next_rma := next_rma + osv$page_size DIV 512;
      IFEND;
      IF next_rma = stop_rma THEN
        full_scan_has_been_done := FALSE;
        REPEAT
          next_rma := next_rma + (osv$page_size DIV 512);
          IF next_rma >= (osv$180_memory_limits.deadstart_upper DIV 512) THEN
            IF full_scan_has_been_done THEN
              mtp$error_stop ('MM - not enough mem to deadstart');
            IFEND;
            next_rma := osv$180_memory_limits.lower DIV 512;
            full_scan_has_been_done := TRUE;
          IFEND;
          pti := pt_length - 1;
          pte := pt_p^ [pti];
          WHILE (pti > 0) AND (NOT pte.v OR (pte.rma <> next_rma)) DO
            pti := pti - 1;
            pte := pt_p^ [pti];
          WHILEND;
        UNTIL NOT pte.v OR (pte.rma <> next_rma);
      IFEND;

{  Locate the end of the block just selected.

      stop_rma := (osv$180_memory_limits.deadstart_upper DIV 512);
      pti := pt_length;
      REPEAT
        pti := pti - 1;
        pte := pt_p^ [pti];
        IF pte.v AND (pte.rma > next_rma) AND (pte.rma < stop_rma) THEN
          stop_rma := pte.rma;
        IFEND;
      UNTIL pti = 0;
      static_next_rma := next_rma;
      static_stop_rma := stop_rma;
    IFEND;

?? POP ??

    #HASH_SVA (sva, pti, count, found);
    IF found THEN
      mtp$error_stop ('MM - PTE exists');
    IFEND;
    pti := pti - count + 1;
    IF pti < 0 THEN
      pti := pti + mmv$pt_length;
    IFEND;
    count := 1;
    WHILE (mmv$pt_p^ [pti].pageid.asid <> 0) AND (count < 33) DO
      count := count + 1;
      pti := pti + 1;
      IF pti = mmv$pt_length THEN
        pti := 0;
      IFEND;
    WHILEND;

    IF count = 33 THEN
      mtp$error_stop ('MM - PT full in deadstart');
    IFEND;

    pte.v := FALSE;
    pte.c := TRUE;
    pte.u := TRUE;
    pte.m := FALSE;
    pte.pageid.asid := sva.asid;
    pte.pageid.pagenum := sva.offset DIV 512;
    pte.rma := static_next_rma;
    static_next_rma := static_next_rma + (osv$page_size DIV 512);
    mmv$pt_p^ [pti] := pte;
    mmp$preset_real_memory (sva, pmc$initialize_to_zero);
    mmv$pt_p^ [pti].v := TRUE;

  PROCEND pf_proc_tables_not_initialized;
?? TITLE := 'MMP$SEND_ESCAPED_ALLOC_FLAG', EJECT ??

{ Purpose:
{   This procedure is called when a WRITE_PAGE_TO_DISK request discovers escaped
{   allocation. This procedure sends a flag to a task to assign the backing storage.
{


  PROCEDURE mmp$send_escaped_alloc_flag
    (    fde_p: gft$locked_file_desc_entry_p;
         pfte_p: ^mmt$page_frame_table_entry);

    VAR
      ijl_ordinal: jmt$ijl_ordinal,
      ijle_p: ^jmt$initiated_job_list_entry,
      old_assign_active: integer,
      pfte_ijle_p: ^jmt$initiated_job_list_entry,
      sfid: gft$system_file_identifier,
      status: syt$monitor_status,
      ste_p: ^mmt$segment_descriptor,
      stxe_p: ^mmt$segment_descriptor_extended,
      xcb_p: ^ost$execution_control_block;


{ Try to get a pointer the the XCB of the last task using the segment. The GTID will
{ be invalid if the task has terminated.  NOTE:  If get_xcb_p is successful (a non-NIL
{ xcb_p is returned, the ajl must be unlocked when processing is done.

    xcb_p := NIL;
    tmp$test_get_xcb_p (fde_p^.global_task_id, xcb_p, ijle_p);
    pfte_ijle_p := jmf$ijle_p (pfte_p^.ijl_ordinal);
    gfp$mtr_get_sfid_from_fde_p (fde_p, sfid, ijl_ordinal);


{ If the GTID is still valid, notify the task to assign space. If the segment is still valid AND the
{ same SFID, use the SDTX.ASSIGN_ACTIVE mechanism to notify the task to allocate space. If the segment
{ is NOT the same SFID, use field in the XCB to pass the SFID.

    IF (xcb_p <> NIL) AND (ijle_p = pfte_ijle_p) THEN
      ste_p := mmp$get_sdt_entry_p (xcb_p, fde_p^.last_segment_number);
      stxe_p := mmp$get_sdtx_entry_p (xcb_p, fde_p^.last_segment_number);
      IF (ste_p^.ste.vl <> osc$vl_invalid_entry) AND (sfid = stxe_p^.sfid) THEN
        old_assign_active := stxe_p^.assign_active;
        set_assign_active (stxe_p, pfte_p^.sva.offset);
        IF old_assign_active = mmc$assign_active_null THEN
          tmp$set_monitor_flag (fde_p^.global_task_id, mmc$mf_segment_mgr_flag, status);
        IFEND;
      ELSEIF xcb_p^.assign_active_sfid = gfv$null_sfid THEN
        xcb_p^.assign_active_sfid := sfid;
        tmp$set_monitor_flag (fde_p^.global_task_id, mmc$mf_segment_mgr_flag, status);
      IFEND;
      jmp$unlock_ajl (ijle_p);


{ If the GTID is no longer valid, let the job monitor of the job take care of allocation.

    ELSE

{ Unlock the ajl set by tmp$test_get_set_xcb_p if necessary.  We may be in this section of
{ code because the ijl pointers do not match.

      IF xcb_p <> NIL THEN
        jmp$unlock_ajl (ijle_p);
      IFEND;

      tmp$get_xcb_p (pfte_ijle_p^.job_monitor_taskid, xcb_p, ijle_p);
      IF xcb_p <> NIL THEN
        IF xcb_p^.assign_active_sfid = gfv$null_sfid THEN
          xcb_p^.assign_active_sfid := sfid;
          tmp$set_monitor_flag (ijle_p^.job_monitor_taskid, mmc$mf_segment_mgr_flag, status);
        IFEND;
        mmv$jmtr_escaped_allocate := mmv$jmtr_escaped_allocate + 1;
        jmp$unlock_ajl (ijle_p);
      ELSE
        mtp$error_stop ('MM - lost segment owner'); {!! can we get here??
        mmv$lost_escaped_allocate := mmv$lost_escaped_allocate + 1;
      IFEND;
    IFEND;

  PROCEND mmp$send_escaped_alloc_flag;

?? TITLE := 'MMP$WRITE_PAGE_TO_DISK' ??
?? EJECT ??

{-------------------------------------------------------------------------
{ This procedure is used to write a page to disk. All pages in the transfer unit will be
{ written unless they are locked.
{
{----------------------------------------------------------------------------


*copyc mmt$write_page_to_disk_status

  PROCEDURE [XDCL] mmp$write_page_to_disk
    (    fde_p: gft$locked_file_desc_entry_p;
         pfti: mmt$page_frame_index;
         iotype: iot$io_function;
         io_id: mmt$io_identifier;
         multiple_page_req: boolean;
     VAR write_status: mmt$write_page_to_disk_status);

    VAR
      aste_p: ^mmt$active_segment_table_entry,
      boffset: integer,
      buffer_descriptor: mmt$buffer_descriptor,
      count: 1 .. 32,
      eoffset: integer,
      found: boolean,
      ijle_p: ^jmt$initiated_job_list_entry,
      ijl_ordinal: jmt$ijl_ordinal,
      length: integer,
      lsva: ost$system_virtual_address,
      max_bytes_to_write: integer,
      offset: integer, {dont make this a subrange}
      pfte_p: ^mmt$page_frame_table_entry,
      pte_p: ^ost$page_table_entry,
      pti: integer,
      served_file: boolean,
      status: syt$monitor_status,
      stxe_p: ^mmt$segment_descriptor_extended,
      sva: ost$system_virtual_address,
      tu_start: integer,
      tu_end: integer,
      write_multiple_pages: boolean,
      xcb_p: ^ost$execution_control_block,
      xpfti: mmt$page_frame_index;



{ If the segment is not assigned to a file, reject the request and send a signal to the
{ owner of the segment to assign a backing file.

    pfte_p := ^mmv$pft_p^ [pfti];
    IF fde_p^.media = gfc$fm_transient_segment THEN
      mmp$send_escaped_alloc_flag (fde_p, pfte_p);
      write_status := ws_no_file_assigned;
      mmv$write_page_statistics [ws_no_file_assigned] := mmv$write_page_statistics [ws_no_file_assigned] + 1;
      RETURN; {----->
    IFEND;


{ Reject the write if the page belongs to a file that has not yet been recovered.
{ The write must be delayed for a while.

    aste_p := pfte_p^.aste_p;
    IF aste_p^.sfid.residence = gfc$tr_system_wait_recovery THEN
      write_status := ws_physical_io_reject;
      mmv$write_page_statistics [ws_physical_io_reject] :=
            mmv$write_page_statistics [ws_physical_io_reject] + 1;
      RETURN; {----->
    IFEND;


{ Determine the maximum number of bytes that can be written. For mass storage files, it is
{ an allocation unit. For served files, the size is the smaller of the files allocation unit size
{ and a constant that is dependent on the buffer size in STORENET.
{ Note also that served files do not allow multiple outstanding write requests on a
{ page because writes can be processed out of order.

    max_bytes_to_write := fde_p^.allocation_unit_size;

    served_file := (fde_p^.media = gfc$fm_served_file);
    IF served_file THEN
      IF mmv$pft_p^ [pfti].active_io_count <> 0 THEN
        write_status := ws_physical_io_reject;
        mmv$write_page_statistics [ws_physical_io_reject] :=
              mmv$write_page_statistics [ws_physical_io_reject] + 1;
        RETURN; {----->
      IFEND;
{!??} mmv$pft_p^ [pfti].io_error := ioc$no_error;
    IFEND;


{Calculate the SVA and LENGTH of the data to write to disk. The algorithm is to
{start with the page specified by <pfti> and search contiguous pages
{in both directions in the segment until 1) the ends of the transfer unit are passed, 2) a locked page is
{found (PFT.LOCKED_PAGE), 3) a page not in memory is found , 4) a non-modified page is found, OR
{5) a page is found that already has active IO (server only).
{The amount of data to write to disk is bounded by the outermost modified pages found by the search.

{Pages in the available modified queue will always be written.  Multiple pages not in the available
{modified queue will not be written if the page belongs to a swapped job.

    sva := pfte_p^.sva;
    tu_start := (sva.offset DIV max_bytes_to_write) * max_bytes_to_write;
    tu_end := tu_start + max_bytes_to_write;
    IF (tu_end > osc$max_segment_length) THEN
      tu_end := osc$max_segment_length;
    IFEND;
    ijle_p := jmf$ijle_p (aste_p^.ijl_ordinal);
    write_multiple_pages := multiple_page_req AND (ijle_p^.swap_status = jmc$iss_executing);
    lsva := sva;
    offset := sva.offset;
    boffset := offset;

  /find_starting_page/
    WHILE boffset > tu_start DO
      boffset := boffset - osv$page_size;
      lsva.offset := boffset;
      #HASH_SVA (lsva, pti, count, found);
      IF NOT found OR NOT mmv$pt_p^ [pti].m THEN
        EXIT /find_starting_page/; {----->
      IFEND;
      xpfti := (mmv$pt_p^ [pti].rma * 512) DIV osv$page_size;
      IF (mmv$pft_p^ [xpfti].locked_page <> mmc$lp_not_locked) OR
            ((mmv$pft_p^ [xpfti].queue_id <> mmc$pq_avail_modified) AND
            (NOT write_multiple_pages)) OR (served_file AND (mmv$pft_p^ [xpfti].active_io_count <> 0)) THEN
        EXIT /find_starting_page/; {----->
      IFEND;
      offset := boffset;
    WHILEND /find_starting_page/;

    eoffset := sva.offset + osv$page_size;
    sva.offset := offset;

  /find_ending_page/
    WHILE (eoffset < tu_end) DO
      lsva.offset := eoffset;
      #HASH_SVA (lsva, pti, count, found);
      IF NOT found OR NOT mmv$pt_p^ [pti].m THEN
        EXIT /find_ending_page/; {----->
      IFEND;
      xpfti := (mmv$pt_p^ [pti].rma * 512) DIV osv$page_size;
      IF (mmv$pft_p^ [xpfti].locked_page <> mmc$lp_not_locked) OR
            ((mmv$pft_p^ [xpfti].queue_id <> mmc$pq_avail_modified) AND
            (NOT write_multiple_pages)) OR (served_file AND (mmv$pft_p^ [xpfti].active_io_count <> 0)) THEN
        EXIT /find_ending_page/; {----->
      IFEND;
      eoffset := eoffset + osv$page_size;
    WHILEND /find_ending_page/;
    length := eoffset - sva.offset;


{Issue the write request to device manager. NOTE that the process of locking the page frames
{will clear the 'modified' bit in the page table.

    buffer_descriptor.buffer_descriptor_type := mmc$bd_paging_io;
    buffer_descriptor.sva := sva;
    buffer_descriptor.page_count := length DIV osv$page_size;

{ Issue the i/o.  Case includes pages on/not on server.
{ Note: EOI update must be done first since it is used by file server.

    mmp$update_eoi (fde_p, eoffset - osv$page_size, mmc$uer_page_written);
    IF NOT served_file THEN
      iop$pager_io (fde_p, sva.offset, buffer_descriptor, length, iotype, io_id, status);
    ELSE
      dfp$server_io (fde_p, iotype, sva.offset, length, io_id, buffer_descriptor, status);
    IFEND;

    IF status.normal THEN
      fde_p^.time_last_modified := #FREE_RUNNING_CLOCK (0);
      write_status := ws_ok;
      mmv$write_page_statistics [ws_ok] := mmv$write_page_statistics [ws_ok] + 1;
    ELSEIF status.condition = dme$transient_error THEN
      write_status := ws_device_manager_reject;
      mmv$write_page_statistics [ws_device_manager_reject] :=
            mmv$write_page_statistics [ws_device_manager_reject] + 1;
    ELSEIF status.condition = ioe$requests_full THEN
      write_status := ws_physical_io_reject;
      mmv$write_page_statistics [ws_physical_io_reject] :=
            mmv$write_page_statistics [ws_physical_io_reject] + 1;
    ELSEIF (status.condition = ioe$unit_disabled) OR (status.condition = dme$volume_unavailable) THEN
      write_status := ws_volume_unavailable;
      mmv$write_page_statistics [ws_volume_unavailable] :=
            mmv$write_page_statistics [ws_volume_unavailable] + 1;
    ELSEIF status.condition = dfe$server_has_terminated THEN
      write_status := ws_server_terminated;
      mmv$write_page_statistics [ws_server_terminated] := mmv$write_page_statistics [ws_server_terminated] +
            1;
    ELSEIF status.condition = dme$job_mode_allocate_required THEN
      mmp$send_escaped_alloc_flag (fde_p, pfte_p);
      write_status := ws_device_manager_reject;
      mmv$write_page_statistics [ws_device_manager_reject] :=
            mmv$write_page_statistics [ws_device_manager_reject] + 1;
    ELSE
      mtp$error_stop ('MM - unexpected phy io error');
    IFEND;

    IF length <> osv$page_size THEN {!This stat should be moved to IF STATUS.NORMAL
      mmv$aging_statistics.multiple_pages_written_to_disk :=
            mmv$aging_statistics.multiple_pages_written_to_disk + 1;
    ELSE
      mmv$aging_statistics.page_written_to_disk := mmv$aging_statistics.page_written_to_disk + 1;
    IFEND;

  PROCEND mmp$write_page_to_disk;

?? TITLE := 'MMP$REMOVE_PAGES_FROM_JWS', EJECT ??

{--------------------------------------------------------------------------------------------------------
{Name:
{  mmp$remove_pages_from_JWS
{Purpose:
{  This procedure is called to remove a page from the working set of a job.
{Notes:
{  - this routine will take care of page map purges if the page goes to the AVAIL_MODIFIED queue.
{    No purging is done if the page goes to the JWS queue. This queue is used for swapping only and no
{    purging is necessary.
{--------------------------------------------------------------------------------------------------------

  PROCEDURE [XDCL] mmp$remove_pages_from_jws
    (    modified_queue_id: mmt$page_frame_queue_id;
         ijle_p: ^jmt$initiated_job_list_entry;
         reject_avail_mod_q_max_reached: boolean;
     VAR xfcount: integer;
     VAR xmcount: integer;
     VAR xrcount: integer);

    VAR
      fde_p: gft$locked_file_desc_entry_p,
      tos: integer,
      pfte_p: ^mmt$page_frame_table_entry,
      pfti: mmt$page_frame_index,
      aste_p: ^mmt$active_segment_table_entry,
      write_status: mmt$write_page_to_disk_status,
      io_id: mmt$io_identifier,
      mcount: integer,
      rcount: integer,
      fcount: integer,
      pte_p: ^ost$page_table_entry;


    mcount := 0;
    rcount := 0;
    fcount := 0;
    io_id.specified := FALSE;

{ Scan the PFTI array and eliminate any entries that cannot be removed. Clear the
{ 'valid' and 'used' bits for the entries that may be removed. Note: this step is unnecessary if
{ the pages are going to the JWS queue - this is done ONLY for job swapout. The map purge is not
{ required until the job starts running again. The job swapper insures that the purge occurs.


    IF modified_queue_id <> mmc$pq_job_working_set THEN
      mmp$reset_find_next_pfti (pfti);
      WHILE pfti <> 0 DO
        mmv$pt_p^ [mmv$pft_p^ [pfti].pti].v := FALSE;
        mmv$pt_p^ [mmv$pft_p^ [pfti].pti].u := FALSE;
        mmp$find_next_pfti (pfti);
      WHILEND;


{ Now that all used and valid bits have been cleared, purge that page map. It is important on a dual
{ CPU system to purge the page maps before deleting the page table entry. Also, it is not
{ possible to reliably determine the state of the 'modified' bit without clearing the 'valid'
{ bit and purging the page map.

      mmp$purge_all_page_map;
    IFEND;

    mmp$reset_find_next_pfti (pfti);

    WHILE pfti <> 0 DO
      pfte_p := ^mmv$pft_p^ [pfti];
      aste_p := pfte_p^.aste_p;
      pfte_p^.age := 0;
      pfte_p^.cyclic_age := 0;
      pte_p := ^mmv$pt_p^ [pfte_p^.pti];
      IF modified_queue_id = mmc$pq_job_working_set THEN
        pte_p^.v := FALSE;
        pte_p^.u := FALSE;
      IFEND;


{ If the segment is locked and (potentially) modified, the pages cannot be removed.
{ Reset PTE.V because it was cleared above.

      IF aste_p^.sfid.residence = gfc$tr_system_wait_recovery THEN
        pfti := 0;
        pte_p^.v := TRUE;
      ELSE
        gfp$mtr_get_locked_fde_p (aste_p^.sfid, ijle_p, fde_p);
        IF fde_p^.segment_lock.locked_for_write AND (pte_p^.m OR (pfte_p^.active_io_count > 0)) THEN
          pfti := 0;
          pte_p^.v := TRUE;


{ If the page belongs to a device file that has the WIRE_EOI attribute, dont remove it if
{ it is the last page of the segment. Set the USED bit so it wont be aged out again
{ for a while. Reset PTE.V because it was cleared above.

        ELSEIF fde_p^.flags.wire_eoi_page THEN
          IF (fde_p^.eoi_byte_address - mmv$pft_p^ [pfti].sva.offset) <= osv$page_size THEN
            pte_p^.v := TRUE;
            pte_p^.u := TRUE;
            pfti := 0;
          IFEND;


{ If the page belongs to a stack segment and is no longer needed, delete the page
{ and relink the page frame to the free queue.

        ELSEIF fde_p^.stack_for_ring <> 0 THEN
          tmp$get_top_of_stack (fde_p^.global_task_id, fde_p^.stack_for_ring, tos);
          IF pfte_p^.sva.offset >= tos THEN
            mmp$delete_pt_entry (pfti, TRUE);
            mmp$relink_page_frame (pfti, mmc$pq_free);
            fde_p^.eoi_byte_address := tos;
            pfti := 0;
            rcount := rcount + 1;
          IFEND;
        IFEND;
      IFEND;

{ Remove the page from the JWS and put it in the new queue.  New queue is determined by the state of the 'UM'
{ bits in the page table entry. New queue may also be specified by caller - modified pages must
{ be put in JWS queue if job is being swapped out.

      IF pfti <> 0 THEN
        rcount := rcount + 1;
        IF NOT pte_p^.m THEN
          IF pfte_p^.active_io_count <> 0 THEN
            IF ok_to_relink_to_avail_modified () THEN
              mcount := mcount + 1; { The page is not really modified, but IO is active. So what?
              mmp$relink_page_frame (pfti, mmc$pq_avail_modified);
              mmv$reject_availmod_relink_stat [1].relinked := mmv$reject_availmod_relink_stat [1].relinked +
                    1;
            ELSEIF NOT reject_avail_mod_q_max_reached THEN
{We do the same as when linking is okay, but count the call for debugging purposes.
              mcount := mcount + 1; { The page is not really modified, but IO is active. So what?
              mmp$relink_page_frame (pfti, mmc$pq_avail_modified);
              fcount := fcount + 1;
              mmv$reject_availmod_relink_stat [1].qflooded := mmv$reject_availmod_relink_stat [1].qflooded +
                    1;
            ELSE
              rcount := rcount - 1; { This page cannot be linked, so we can't count it.
              pte_p^.v := TRUE;
              fcount := fcount + 1;
              mmv$reject_availmod_relink_stat [1].rejected := mmv$reject_availmod_relink_stat [1].rejected +
                    1;
            IFEND;
          ELSEIF mmv$no_memory_buffering THEN
            mmp$delete_pt_entry (pfti, TRUE);
            mmp$relink_page_frame (pfti, mmc$pq_free);
          ELSE
            mmp$relink_page_frame (pfti, mmc$pq_avail);
          IFEND;
        ELSE
          IF modified_queue_id = mmc$pq_avail_modified THEN
            IF ok_to_relink_to_avail_modified () THEN
              mmp$relink_page_frame (pfti, modified_queue_id);
              IF ((mmv$reassignable_page_frames.now + mmv$reassignable_page_frames.soon) <
                    mmv$write_aged_out_pages) AND (modified_queue_id = mmc$pq_avail_modified) THEN
                io_id.specified := FALSE;
                mmp$write_page_to_disk (fde_p, pfti, ioc$write_page, io_id, mmv$multi_page_write,
                      write_status);
              IFEND;
              mcount := mcount + 1;
              mmv$reject_availmod_relink_stat [2].relinked := mmv$reject_availmod_relink_stat [2].relinked +
                    1;
            ELSEIF NOT reject_avail_mod_q_max_reached THEN
{We do the same as when linking is okay, but count the call for debugging purposes.
              mmp$relink_page_frame (pfti, modified_queue_id);
              IF ((mmv$reassignable_page_frames.now + mmv$reassignable_page_frames.soon) <
                    mmv$write_aged_out_pages) AND (modified_queue_id = mmc$pq_avail_modified) THEN
                io_id.specified := FALSE;
                mmp$write_page_to_disk (fde_p, pfti, ioc$write_page, io_id, mmv$multi_page_write,
                      write_status);
              IFEND;
              mcount := mcount + 1;
              fcount := fcount + 1;
              mmv$reject_availmod_relink_stat [2].qflooded := mmv$reject_availmod_relink_stat [2].qflooded +
                    1;
            ELSE
              rcount := rcount - 1; { This page cannot be linked, so we can't count it.
              fcount := fcount + 1;
              pte_p^.v := TRUE;
              mmv$reject_availmod_relink_stat [2].rejected := mmv$reject_availmod_relink_stat [2].rejected +
                    1;
            IFEND;
          ELSE
            mmp$relink_page_frame (pfti, modified_queue_id);
            IF ((mmv$reassignable_page_frames.now + mmv$reassignable_page_frames.soon) <
                  mmv$write_aged_out_pages) AND (modified_queue_id = mmc$pq_avail_modified) THEN
              io_id.specified := FALSE;
              mmp$write_page_to_disk (fde_p, pfti, ioc$write_page, io_id, mmv$multi_page_write, write_status);
            IFEND;
            mcount := mcount + 1;
          IFEND;
        IFEND;
      IFEND;

      mmp$find_next_pfti (pfti);
    WHILEND;

    mmv$aging_statistics.remove_unmodified_page_from_ws :=
          mmv$aging_statistics.remove_unmodified_page_from_ws + rcount - mcount;
    mmv$aging_statistics.remove_modified_page_from_ws := mmv$aging_statistics.remove_modified_page_from_ws +
          mcount;
    xfcount := fcount;
    xrcount := rcount;
    xmcount := mcount;

  PROCEND mmp$remove_pages_from_jws;
?? TITLE := 'MMP$REMOVE_PAGE_FROM_JWS', EJECT ??

{--------------------------------------------------------------------------------------------------------
{Name:
{  mmp$remove_page_from_JWS
{Purpose:
{  This procedure is called to remove a page from the working set of a job.
{Notes:
{  - this routine will take care of page map purges.
{  - this routine does not neccessarily write the page to disk.
{--------------------------------------------------------------------------------------------------------


  PROCEDURE [XDCL] mmp$remove_page_from_jws
    (    pfti: mmt$page_frame_index;
         ijle_p: ^jmt$initiated_job_list_entry;
         reject_avail_mod_q_max_reached: boolean;
     VAR mcount: integer;
     VAR rcount: integer;
     VAR status: mmt$relink_page_status);

    VAR
      fde_p: gft$locked_file_desc_entry_p,
      tos: integer,
      pfte_p: ^mmt$page_frame_table_entry,
      aste_p: ^mmt$active_segment_table_entry,
      write_status: mmt$write_page_to_disk_status,
      io_id: mmt$io_identifier,
      pte_p: ^ost$page_table_entry;

{Reject the request if the page is locked.
    mcount := 0;
    pfte_p := ^mmv$pft_p^ [pfti];
    aste_p := pfte_p^.aste_p;
    IF (pfte_p^.locked_page <> mmc$lp_not_locked) OR (aste_p^.sfid.residence = gfc$tr_system_wait_recovery)
          THEN
      rcount := 0;
      status := mmc$rps_page_locked;
      RETURN; {<----}
    IFEND;


{Clear the valid bit in the page table entry for the page.
{Valid bit MUST be cleared and map purged (in dual CPU) before examining modified bit.

    pte_p := ^mmv$pt_p^ [pfte_p^.pti];
    pte_p^.v := FALSE;
    pte_p^.u := FALSE;
    mmp$sva_purge_one_page_map (pfte_p^.sva);


{If page belongs to a locked segment and is modified, leave it alone. NOTE: valid bit must
{be set again because it was cleared in a previous step.

    gfp$mtr_get_locked_fde_p (aste_p^.sfid, ijle_p, fde_p);

    IF fde_p^.segment_lock.locked_for_write AND (pte_p^.m OR (pfte_p^.active_io_count > 0)) THEN
      pte_p^.v := TRUE;
      rcount := 0;
      status := mmc$rps_page_locked;
      RETURN; {<----}
    ELSEIF fde_p^.flags.wire_eoi_page THEN

{ If the page belongs to a device file that has the WIRE_EOI attribute, dont remove it if
{ it is the last page of the segment. Set the USED bit so it we won't try to age it
{ for a while. Reset PTE.V because it was cleared above.

      IF (fde_p^.eoi_byte_address - pfte_p^.sva.offset) <= osv$page_size THEN
        pte_p^.v := TRUE;
        pte_p^.u := TRUE;
        rcount := 0;
        status := mmc$rps_page_reject;
        RETURN; {<----}
      IFEND;
    IFEND;

{Reset page ages.

    pfte_p^.age := 0;
    pfte_p^.cyclic_age := 0;
    rcount := 1;


{If the page belongs to a stack segment and is no longer needed, delete the page
{and relink the page frame to the free queue.

    IF fde_p^.stack_for_ring <> 0 THEN
      tmp$get_top_of_stack (fde_p^.global_task_id, fde_p^.stack_for_ring, tos);
      IF pfte_p^.sva.offset >= tos THEN
        mmp$delete_pt_entry (pfti, TRUE);
        mmp$relink_page_frame (pfti, mmc$pq_free);
        fde_p^.eoi_byte_address := tos;
        status := mmc$rps_page_relinked;
        RETURN; {<----}
      IFEND;
    IFEND;


{Remove the page from the JWS and put it in the new queue.  New queue is determined by the state of the 'UM'
{bits in the page table entry. New queue may also be specified by caller - modified pages must
{be put in JWS queue if job is being swapped out.

    status := mmc$rps_page_relinked;
    IF NOT pte_p^.m THEN
      IF pfte_p^.active_io_count <> 0 THEN
        IF ok_to_relink_to_avail_modified () THEN
          mmp$relink_page_frame (pfti, mmc$pq_avail_modified);
          mmv$aging_statistics.remove_modified_page_from_ws :=
                mmv$aging_statistics.remove_modified_page_from_ws + 1;
          mcount := mcount + 1;
          mmv$reject_availmod_relink_stat [3].relinked := mmv$reject_availmod_relink_stat [3].relinked + 1;
        ELSEIF NOT reject_avail_mod_q_max_reached THEN
          mmp$relink_page_frame (pfti, mmc$pq_avail_modified);
          mmv$aging_statistics.remove_modified_page_from_ws :=
                mmv$aging_statistics.remove_modified_page_from_ws + 1;
          mcount := mcount + 1;
          mmv$reject_availmod_relink_stat [3].qflooded := mmv$reject_availmod_relink_stat [3].qflooded + 1;
          status := mmc$rps_avail_mod_queue_flooded;
        ELSE
          rcount := 0; { This page cannot be linked, so we can't count it.
          pte_p^.v := TRUE;
          mmv$reject_availmod_relink_stat [3].rejected := mmv$reject_availmod_relink_stat [3].rejected + 1;
          status := mmc$rps_avail_mod_page_reject;
        IFEND;
      ELSEIF mmv$no_memory_buffering THEN
        mmp$delete_pt_entry (pfti, TRUE);
        mmp$relink_page_frame (pfti, mmc$pq_free);
        mmv$aging_statistics.remove_unmodified_page_from_ws :=
              mmv$aging_statistics.remove_unmodified_page_from_ws + 1;
      ELSE
        mmp$relink_page_frame (pfti, mmc$pq_avail);
        mmv$aging_statistics.remove_unmodified_page_from_ws :=
              mmv$aging_statistics.remove_unmodified_page_from_ws + 1;
      IFEND;
    ELSE
      IF ok_to_relink_to_avail_modified () THEN
        mmp$relink_page_frame (pfti, mmc$pq_avail_modified);
        IF ((mmv$reassignable_page_frames.now + mmv$reassignable_page_frames.soon) <
              mmv$write_aged_out_pages) THEN
          io_id.specified := FALSE;
          mmp$write_page_to_disk (fde_p, pfti, ioc$write_page, io_id, mmv$multi_page_write, write_status);
        IFEND;
        mmv$aging_statistics.remove_modified_page_from_ws :=
              mmv$aging_statistics.remove_modified_page_from_ws + 1;
        mcount := 1;
        mmv$reject_availmod_relink_stat [4].relinked := mmv$reject_availmod_relink_stat [4].relinked + 1;
      ELSEIF NOT reject_avail_mod_q_max_reached THEN
        mmp$relink_page_frame (pfti, mmc$pq_avail_modified);
        IF ((mmv$reassignable_page_frames.now + mmv$reassignable_page_frames.soon) <
              mmv$write_aged_out_pages) THEN
          io_id.specified := FALSE;
          mmp$write_page_to_disk (fde_p, pfti, ioc$write_page, io_id, mmv$multi_page_write, write_status);
        IFEND;
        mmv$aging_statistics.remove_modified_page_from_ws :=
              mmv$aging_statistics.remove_modified_page_from_ws + 1;
        mcount := 1;
        mmv$reject_availmod_relink_stat [4].qflooded := mmv$reject_availmod_relink_stat [4].qflooded + 1;
        status := mmc$rps_avail_mod_queue_flooded;
      ELSE
        rcount := 0; { This page cannot be linked, so we can't count it.
        pte_p^.v := TRUE;
        mmv$reject_availmod_relink_stat [4].rejected := mmv$reject_availmod_relink_stat [4].rejected + 1;
        status := mmc$rps_avail_mod_page_reject;
      IFEND;
    IFEND;

  PROCEND mmp$remove_page_from_jws;
?? TITLE := 'MMP$REMOVE_PAGE_FROM_JOB' ??
?? EJECT ??

{------------------------------------------------------------------------------------
{
{ This procedure is called to remove pages from a job's working set. It is called
{ from mmp$mm_write_modified_pages (if the request is coming from detach file) to
{ relink unmodified jws pages to the available or free queues.
{
{------------------------------------------------------------------------------------

  VAR
    mmv$remove_page_model_number: [XDCL, #GATE] 0 .. 0ff(16) := 16(16);

  PROCEDURE [XDCL] mmp$remove_page_from_job
    (    pfti: mmt$page_frame_index);

    VAR
{     cst_p: ^ost$cpu_state_table,
      pte_p: ^ost$page_table_entry;

    pte_p := ^mmv$pt_p^ [mmv$pft_p^ [pfti].pti];

{Clear the valid bit in the page table entry for the page.

    pte_p^.v := FALSE;
    pte_p^.u := FALSE;
    mmp$sva_purge_one_page_map (mmv$pft_p^ [pfti].sva);
    mmv$pft_p^ [pfti].age := 0;
    mmv$pft_p^ [pfti].cyclic_age := 0;

{Remove the page from the JWS and put it in the new queue.

    IF NOT pte_p^.m THEN
      IF mmv$pft_p^ [pfti].active_io_count <> 0 THEN
        IF ok_to_relink_to_avail_modified () THEN
          mmp$relink_page_frame (pfti, mmc$pq_avail_modified);
          mmv$reject_availmod_relink_stat [5].relinked := mmv$reject_availmod_relink_stat [5].relinked + 1;
        ELSEIF mmv$wait_on_avail_mod_q_full THEN
          mmp$relink_page_frame (pfti, mmc$pq_avail_modified);
          mmv$reject_availmod_relink_stat [5].qflooded := mmv$reject_availmod_relink_stat [5].qflooded + 1;
        ELSE
          pte_p^.v := TRUE;
          mmv$reject_availmod_relink_stat [5].rejected := mmv$reject_availmod_relink_stat [5].rejected + 1;
        IFEND;
      ELSEIF mmv$no_memory_buffering THEN
        mmp$delete_pt_entry (pfti, TRUE);
        mmp$relink_page_frame (pfti, mmc$pq_free);
      ELSE
        mmp$relink_page_frame (pfti, mmc$pq_avail);
      IFEND;
    ELSE

{ The following is a workaround to prevent 830s and 825s from crashing.  Somehow they can end up with
{ the modified bit set on a page that was attached in read mode only.  Engineers are looking into
{ the problem.

{     mtp$cst_p (cst_p);
      IF mtf$cst_p()^.element_id.model_number <= mmv$remove_page_model_number THEN
        pte_p^.m := FALSE;
        IF mmv$pft_p^ [pfti].active_io_count <> 0 THEN
          IF ok_to_relink_to_avail_modified () THEN
            mmp$relink_page_frame (pfti, mmc$pq_avail_modified);
            mmv$reject_availmod_relink_stat [6].relinked := mmv$reject_availmod_relink_stat [6].relinked + 1;
          ELSEIF mmv$wait_on_avail_mod_q_full THEN
            mmp$relink_page_frame (pfti, mmc$pq_avail_modified);
            mmv$reject_availmod_relink_stat [6].qflooded := mmv$reject_availmod_relink_stat [6].qflooded + 1;
          ELSE
            pte_p^.v := TRUE;
            mmv$reject_availmod_relink_stat [6].rejected := mmv$reject_availmod_relink_stat [6].rejected + 1;
          IFEND;
        ELSEIF mmv$no_memory_buffering THEN
          mmp$delete_pt_entry (pfti, TRUE);
          mmp$relink_page_frame (pfti, mmc$pq_free);
        ELSE
          mmp$relink_page_frame (pfti, mmc$pq_avail);
        IFEND;
        dpp$display_error ('INFORMATIVE: PAGE FOUND MODIFIED ON REMOVE');
        mtp$store_informative_message ('PAGE FOUND MODIFIED ON REMOVE');
      ELSE
        mtp$error_stop ('PAGE FOUND MODIFIED ON REMOVE');
      IFEND;
    IFEND;
  PROCEND mmp$remove_page_from_job;

?? TITLE := 'CHECK_FREE_QUEUES' ??
?? EJECT ??

{----------------------------------------------------------------------
{
{This procedure is called after processing a request to determine if
{the free queues need to be replenished. If so, a flag is set to cause
{CP Monitor to call Memory Manager.
{
{---------------------------------------------------------------------

  PROCEDURE [INLINE] check_free_queues
    (    cst_p: ^ost$cpu_state_table);

    VAR
      count: integer;

    count := mmv$reassignable_page_frames.now + mmv$reassignable_page_frames.soon;
    IF count < mmv$aggressive_aging_level_2 THEN
      jmp$recognize_thrashing;
    IFEND;
    IF count <= mmv$aggressive_aging_level THEN
      IF count < mmv$aggressive_aging_level THEN
        cst_p^.dispatch_control.asynchronous_interrupts_pending := TRUE;
      IFEND;
      mmv$time_to_call_mem_mgr := 0;
      osv$time_to_check_asyn := 0;
      mmv$aging_statistics.force_aggressive_aging := mmv$aging_statistics.force_aggressive_aging + 1;
    IFEND;

  PROCEND check_free_queues;

?? TITLE := 'MMP$AGE_JOB_WORKING_SET', EJECT ??

{--------------------------------------------------------------------------------------------------------
{  This routine scan the page frames in the working set of a
{  job and updates the page ages, clears the page table 'USED' bits,
{  and removes unused pages from the working set of the job.
{--------------------------------------------------------------------------------------------------------


  PROCEDURE [XDCL] mmp$age_job_working_set
    (    ijle_p: ^jmt$initiated_job_list_entry;
         jcb_p: ^jmt$job_control_block;
         reject_avail_mod_q_max_reached: boolean;
     VAR avail_mod_queue_overrun: boolean);


    VAR
      fde_p: gft$file_desc_entry_p,
      pqle_p: ^mmt$page_queue_list_entry,
      i: integer,
      cptime: integer,
      pfti: mmt$page_frame_index,
      perf: mmt$link,
      link: mmt$link,
      lu_link: mmt$link,
      pfte_p: ^mmt$page_frame_table_entry,
      lu_pfte_p: ^mmt$page_frame_table_entry,
      fcount: integer,
      mcount: integer,
      rcount: integer,
      aii: integer,
      aic: integer,
      aif: integer;

    IF mmv$aging_algorithm >= 4 THEN
      cptime := ijle_p^.statistics.cp_time.time_spent_in_job_mode;
    ELSE
      cptime := ijle_p^.statistics.cp_time.time_spent_in_mtr_mode +
            ijle_p^.statistics.cp_time.time_spent_in_job_mode;
    IFEND;
    aii := (cptime - jcb_p^.cptime_next_age_working_set + jcb_p^.page_aging_interval) DIV
          jcb_p^.page_aging_interval;
    mmv$aging_statistics.calls_to_age_jws := mmv$aging_statistics.calls_to_age_jws + 1;
    IF (aii < 1) THEN
      aii := 1;
    IFEND;
    jcb_p^.cptime_next_age_working_set := cptime + jcb_p^.page_aging_interval;
    pqle_p := ^ijle_p^.job_page_queue_list [mmc$pq_job_working_set];

{ Calculate the values of  AIC, and AIF to be used in processing this request.
    aic := mmv$age_interval_ceiling;
    aif := mmv$age_interval_floor;


{ Age the job working set and relink the page frames into LRU order.
    mmp$reset_store_pfti;
    lu_link.bkw := 0;
    link.bkw := pqle_p^.link.bkw;
    rcount := ijle_p^.job_page_queue_list [mmc$pq_job_working_set].count - jcb_p^.min_working_set_size;

    WHILE (link.bkw <> 0) AND (rcount > 0) DO
      pfte_p := ^mmv$pft_p^ [link.bkw];
      pfti := link.bkw;
      link := pfte_p^.link;
      IF mmv$pt_p^ [pfte_p^.pti].u THEN
        mmv$pt_p^ [pfte_p^.pti].u := FALSE;
        pfte_p^.age := 0;
        pfte_p^.cyclic_age := 0;
        IF lu_link.bkw = 0 THEN
          lu_pfte_p := pfte_p;
          lu_link.fwd := link.fwd;
          lu_link.bkw := pfti;
        IFEND;
      ELSEIF pfte_p^.locked_page <> mmc$lp_not_locked THEN

{ Do nothing

      ELSEIF ((pfte_p^.age + aii) > aic) THEN
        mmp$store_pfti (pfti);
        rcount := rcount - 1;
      ELSEIF ((pfte_p^.age + aii) > aif) THEN
        mmp$store_pfti (pfti);
        rcount := rcount - 1;
        mmv$aging_statistics.age_exceeds_aif := mmv$aging_statistics.age_exceeds_aif + 1;
        aif := 65536; {Only remove one page for age > AIF}
      ELSE
        pfte_p^.age := pfte_p^.age + aii;
        IF (lu_link.bkw <> 0) THEN
          IF link.bkw = 0 THEN
            pqle_p^.link.fwd := link.fwd;
          ELSE
            mmv$pft_p^ [link.bkw].link.fwd := link.fwd;
          IFEND;
          mmv$pft_p^ [link.fwd].link.bkw := link.bkw;
          pfte_p^.link := lu_link;
          IF lu_link.fwd = 0 THEN
            pqle_p^.link.bkw := pfti;
          ELSE
            mmv$pft_p^ [lu_link.fwd].link.bkw := pfti;
          IFEND;
          lu_pfte_p^.link.fwd := pfti;
          lu_link.fwd := pfti;
        IFEND;
      IFEND;
    WHILEND;


{ If any pages have been selected for removal, remove the pages from the working set.

    mmp$fetch_pfti_array_size (rcount);
    mcount := 0;
    avail_mod_queue_overrun := FALSE;
    IF rcount > 0 THEN
      mmp$remove_pages_from_jws (mmc$pq_avail_modified, ijle_p, reject_avail_mod_q_max_reached, fcount,
            mcount, rcount);
      avail_mod_queue_overrun := fcount > 0;
      mmv$aging_statistics.age_exceeds_aic := mmv$aging_statistics.age_exceeds_aic + rcount;
    ELSE
      avail_mod_queue_overrun := FALSE;
      mmp$purge_all_page_map;
    IFEND;

  PROCEND mmp$age_job_working_set;
?? TITLE := 'MMP$REMOVE_STALE_PAGES', EJECT ??

{--------------------------------------------------------------------------------------------------------
{
{ This procedure is called to remove stale pages from a page queue. A stale page is defined as a page
{ that has a 'SWAP_COUNT' (field should be renamed) greater or equal to the value specified by the caller.
{
{ This procedure does the following:
{    . Scan each page in the page queue
{    . If the 'u' bit in the page table is set
{         clear it if aging the shared queue (if aging because of swapping,dont clear it - this would defeat
{              the page aging algorithms.
{    . ELSE if the swap count < stale count, increment swap count
{    . ELSE remove the page from the page queue (queue_id is passed from the caller to indicate if page goes
{         to AM or JWS queue (used for swap aging))
{
{ This procedure is intended to be used to:
{    . age the shared queue
{    . provide SWAPPING_AIC aging of job working sets prior to swap.
{
{ This procedure does NOT keep the page queue in a LRU order.
{
{--------------------------------------------------------------------------------------------------------


  PROCEDURE [XDCL] mmp$remove_stale_pages
    (VAR pqle: mmt$page_queue_list_entry;
         aic_modified: integer;
         aic_unmodified: integer;
         jcb_p: ^jmt$job_control_block;
         ijle_p: ^jmt$initiated_job_list_entry;
         queue_id: mmt$page_frame_queue_id;
         minimum_working_set: mmt$page_frame_index;
     VAR modified_pages_removed: integer;
     VAR total_pages_removed: integer;
     VAR total_pages_scanned: integer);

    VAR
      cptime: integer,
      eoi: ost$segment_length,
      lstatus: syt$monitor_status,
      rcount: integer,
      ignore_pages_overrun_avail_mod: integer,
      pfti: mmt$page_frame_index,
      next_pfti: mmt$page_frame_index,
      mmv$age_not_pageable: [XDCL] integer := 0,
      pfte_p: ^mmt$page_frame_table_entry;

    total_pages_scanned := 0;
    IF jcb_p <> NIL THEN
      jcb_p^.next_cyclic_aging_time := #FREE_RUNNING_CLOCK (0) + jcb_p^.cyclic_aging_interval;
      IF mmv$aging_algorithm >= 4 THEN
        cptime := ijle_p^.statistics.cp_time.time_spent_in_job_mode;
      ELSE
        cptime := ijle_p^.statistics.cp_time.time_spent_in_mtr_mode +
              ijle_p^.statistics.cp_time.time_spent_in_job_mode;
      IFEND;
      jcb_p^.cptime_next_age_working_set := cptime + jcb_p^.page_aging_interval;
    IFEND;

    mmp$reset_store_pfti;
    pfti := pqle.link.bkw;
    rcount := pqle.count - minimum_working_set;

    WHILE (pfti <> 0) AND (rcount > 0) DO
      total_pages_scanned := total_pages_scanned + 1;
      pfte_p := ^mmv$pft_p^ [pfti];
      next_pfti := pfte_p^.link.bkw;
      IF mmv$pt_p^ [pfte_p^.pti].u AND NOT jsv$free_working_set_on_swapout THEN
        mmv$pt_p^ [pfte_p^.pti].u := FALSE;
        pfte_p^.cyclic_age := 0;
        pfte_p^.age := 0;
      ELSEIF ((mmv$pt_p^ [pfte_p^.pti].m = TRUE) AND (pfte_p^.cyclic_age < aic_modified))
{     } OR ((mmv$pt_p^ [pfte_p^.pti].m = FALSE) AND (pfte_p^.cyclic_age < aic_unmodified)) THEN
        pfte_p^.cyclic_age := pfte_p^.cyclic_age + 1;
      ELSEIF pfte_p^.locked_page <> mmc$lp_not_locked THEN

{ Do nothing

      ELSE
        mmp$store_pfti (pfti);
        rcount := rcount - 1;
      IFEND;

      pfti := next_pfti;
    WHILEND;


{ If any pages have been selected for removal, remove the pages from the working set.

    mmp$fetch_pfti_array_size (rcount);
    IF rcount > 0 THEN
      mmp$remove_pages_from_jws (queue_id, ijle_p, FALSE {= overrun Q} , ignore_pages_overrun_avail_mod,
            modified_pages_removed, total_pages_removed);
    ELSE
      total_pages_removed := 0;
      modified_pages_removed := 0;
      IF queue_id = mmc$pq_avail_modified THEN
        mmp$purge_all_page_map;
      IFEND;
    IFEND;


  PROCEND mmp$remove_stale_pages;
?? TITLE := 'MMP$TRIM_JOB_WORKING_SET', EJECT ??

{--------------------------------------------------------------------------------------------------
{ This procedure is called to trim a job working set.
{ If the size of the working set exceeds the max allowed, pages are removed until the size is ok.
{--------------------------------------------------------------------------------------------------

  PROCEDURE [XDCL] mmp$trim_job_working_set
    (    ijle_p: ^jmt$initiated_job_list_entry;
         jcb_p: ^jmt$job_control_block;
         trim_to_swap_size: boolean;
     VAR avail_mod_queue_overrun: boolean);

    VAR
      last_pfti: mmt$page_frame_index,
      maximum_pages_to_swap: integer,
      mcount: integer,
      page_skip: boolean,
      pfte_p: ^mmt$page_frame_table_entry,
      pfti: mmt$page_frame_index,
      pte_p: ^ost$page_table_entry,
      rcount: integer,
      relink_status: mmt$relink_page_status,
      skip_count: integer,
      smallest_maximum_working_set: integer;

    avail_mod_queue_overrun := FALSE;
    IF syv$recovering_job_count <> 0 THEN
      RETURN; {----->
    IFEND;

{ Skip working set adjustment if a segment lock is outstanding.
    IF ijle_p^.override_job_working_set_max > 0 THEN
      RETURN; {----->
    IFEND;

    IF (jcb_p^.max_working_set_size < mmv$max_working_set_size) THEN
      smallest_maximum_working_set := jcb_p^.max_working_set_size;
    ELSE
      smallest_maximum_working_set := mmv$max_working_set_size;
    IFEND;

    IF trim_to_swap_size THEN
      IF ijle_p^.task_created_after_last_swap THEN
        maximum_pages_to_swap := jsv$max_pages_first_swap_task;
      ELSE
        maximum_pages_to_swap := jsv$maximum_pages_to_swap;
      IFEND;

      IF smallest_maximum_working_set > maximum_pages_to_swap THEN
        smallest_maximum_working_set := maximum_pages_to_swap;
      IFEND;
    ELSEIF ijle_p^.job_page_queue_list [mmc$pq_job_working_set].count <
          (smallest_maximum_working_set + 1) THEN
      RETURN; {----->
    IFEND;

    pfti := ijle_p^.job_page_queue_list [mmc$pq_job_working_set].link.bkw;
    relink_status := mmc$rps_page_relinked;

    WHILE (ijle_p^.job_page_queue_list [mmc$pq_job_working_set].count > smallest_maximum_working_set)
{   } AND (pfti <> 0)
{   } AND ((mmv$wait_on_avail_mod_q_full = FALSE) OR (relink_status <> mmc$rps_avail_mod_queue_flooded)) DO
      pfte_p := ^mmv$pft_p^ [pfti];
      last_pfti := pfti;
      pfti := pfte_p^.link.bkw;
      mmp$remove_page_from_jws (last_pfti, ijle_p, NOT mmv$wait_on_avail_mod_q_full, mcount, rcount,
            relink_status);
    WHILEND;
    avail_mod_queue_overrun := mmv$wait_on_avail_mod_q_full AND
          (relink_status = mmc$rps_avail_mod_queue_flooded);

  PROCEND mmp$trim_job_working_set;
?? TITLE := 'MMP$DUMP_SHARED_QUEUE' ??
?? EJECT ??

{--------------------------------------------------------------------------------------------------------
{ This procedure is called to take pages out of the shared queue.  Pages will be removed until
{ mmv$reassignable_page_frames.now minus mmv$aggressive_aging_level_2 is greater than
{ the number of pages requested.
{
{ The removal of pages from the shared queues will be done in two passes.  On the first pass a number of
{ pages will be removed from each shared queue as determined by the minimum size attribute of the queue.
{ If the minimum is zero, then all of the pages in that queue will be removed during the first pass.
{ If the first pass does not remove enough pages, then another pass will be made during which all pages
{ can be removed if necessary.  The passes are terminated early whenever enough pages have been removed.
{--------------------------------------------------------------------------------------------------------

  PROCEDURE [XDCL] mmp$dump_shared_queue
    (    total_pages_needed: mmt$page_frame_index);

    VAR
      ignore_relink_status: mmt$relink_page_status,
      modified_pages_removed: integer,
      next_pfti: mmt$page_frame_index,
      pages_from_queue: integer,
      pages_removed: integer,
      pfti: mmt$page_frame_index,
      reduce_queue_below_minimum: boolean,
      queue_id: mmt$page_frame_queue_id;

    reduce_queue_below_minimum := FALSE;

  /two_passes/
    WHILE 1 = 1 DO
      FOR queue_id := mmv$last_active_shared_queue DOWNTO mmc$pq_shared_first DO
        pfti := mmv$gpql [queue_id].pqle.link.bkw;
        IF reduce_queue_below_minimum THEN
          pages_from_queue := mmv$gpql [queue_id].pqle.count;
        ELSE
          pages_from_queue := mmv$gpql [queue_id].pqle.count - mmv$gpql [queue_id].minimum_nominal;
        IFEND;

{ If the count of pages in the queue is less than or equal to the minimum, one of the pages will be
{ removed on the first pass.  If there are no pages in the queue, pfti will be zero.

      /dump_a_queue/
        WHILE pfti <> 0 DO
          next_pfti := mmv$pft_p^ [pfti].link.bkw;
          mmp$remove_page_from_jws (pfti, NIL, TRUE {= reject when Avail Mod Q Max} , modified_pages_removed,
                pages_removed, ignore_relink_status);

{We ignore the relink status and may not remove "enough" pages from that particular Q. In doing this,
{we tend more to free unmodified pages what's better in the current situation anyway, as the
{Avail Mod Q is already flooded.

          pfti := next_pfti;
          pages_from_queue := pages_from_queue - 1;
          IF pages_from_queue <= 0 THEN
            EXIT /dump_a_queue/ {----->
          IFEND;
          IF (mmv$reassignable_page_frames.now - mmv$aggressive_aging_level_2) >= total_pages_needed THEN
            EXIT /two_passes/ {Terminate both passes since the pages needed are available.
          IFEND;
        WHILEND /dump_a_queue/;
      FOREND;

      IF reduce_queue_below_minimum THEN
        EXIT /two_passes/; { exit, All the shared queues have been dumped including the minimums.
      IFEND;
      reduce_queue_below_minimum := TRUE; {Allow queues to be reduced below the minimum size on pass 2.
    WHILEND /two_passes/;


  PROCEND mmp$dump_shared_queue;

?? TITLE := 'MMP$ASSIGN_PAGE_FRAME' ??
?? EJECT ??

{--------------------------------------------------------------------------------------------------------
{ This procedure is called to assign a new page frame to a segment.
{ The routine performs the following steps:
{    . obtain a free page frame.
{    . delete the PT entry using the page frame (if necessary).
{    . make a new PT entry for the page.
{    . update the PFT entry for the page frame and the AST entry for the seg.
{
{      MMP$ASSIGN_PAGE_FRAME (SVA, ASTE_P, NUMBER_OF_PAGES_TO_ASSIGN, STARTING_PFTI,
{         ASSIGNED_PAGE_COUNT, PFTI, PSTATUS);
{
{  SVA: (INPUT) SVA that identifies page
{  ASTE_P: (INPUT) Pointer to AST table entry for the segment
{  NUMBER_OF_PAGES_TO_ASSIGN: (INPUT) This parameter specifies how many pages
{       the caller wants assigned.
{  STARTING_PFTI: (INPUT) This parameter specifies the pfti where page assignment is to begin.
{       This parameter will be non-zero only if the request for page assignment is coming
{       from the ASSIGN_CONTIGUOUS_MEMORY request.
{  ASSIGNED_PAGE_COUNT: (OUTPUT) Number of pages actually assigned. May be less than requested if a
{       page already exists in the specified range.
{  FIRST_PFTI: (OUTPUT) Page Frame Table index of first page frame assigned.  If more
{       than one page assigned the other pages are linked through the
{       backward link in the page frame table entry.
{  PSTATUS: (OUTPUT) Status
{           ps_done - if all pages were assigned
{           ps_no_memory - if insufficient memory is available to assign ALL requested
{           ps_pt_full - if page table full. Some pages may have been assigned before page table full
{              occurred; ASSIGNED_PAGE_COUNT will indicate how many pages were assigned.
{           ps_valid_in_pt - if a page is in PT. Some pages may have been assigned before valid in page
{              table occurred; ASSIGNED_PAGE_COUNT will inicate how many pages were assigned.
{--------------------------------------------------------------------------------------------------------

  PROCEDURE [XDCL] mmp$assign_page_frame
    (    sva: ost$system_virtual_address;
         aste_p: ^mmt$active_segment_table_entry;
         number_of_pages_to_assign: mmt$page_frame_index;
         starting_pfti: mmt$page_frame_index;
     VAR assigned_page_count: mmt$page_frame_index;
     VAR first_pfti: mmt$page_frame_index;
     VAR pstatus: mmt$page_pull_status);

    VAR
      assign_page_loop_count: mmt$page_frame_index,
      mpt_status: mmt$make_pt_entry_status,
      page_sva: ost$system_virtual_address,
      pfti: mmt$page_frame_index,
      pfte_p: ^mmt$page_frame_table_entry,
      temp_offset: integer;

    assigned_page_count := 0;
    first_pfti := starting_pfti;
    IF first_pfti <> 0 THEN
      pfti := starting_pfti - 1;
    IFEND;
    IF number_of_pages_to_assign > mmv$reassignable_page_frames.now THEN
      pstatus := ps_no_memory;
      RETURN; {----->
    IFEND;

    pstatus := ps_done;
    page_sva := sva;
    assign_page_loop_count := number_of_pages_to_assign;

    WHILE assign_page_loop_count > 0 DO

{Get an available page frame to use for the new page.  Return an error code if no memory is available.

      IF starting_pfti = 0 THEN
        mmp$get_avail_page_frame (pfti);
        IF pfti = 0 THEN
          pstatus := ps_no_memory;
          RETURN; {----->
        IFEND;
      ELSE

{The non-zero starting_pfti indicates that the request to assign page frames is
{coming from an ASSIGN_CONTIGUOUS_MEMORY request. That request has verified
{that the page frames from (starting_pfti-->number_pages_to_assign) are available.

        pfti := pfti + 1;
      IFEND;
      pfte_p := ^mmv$pft_p^ [pfti];


{Make a PT entry for the new page. If page table was full, link the page frame back to the free queue.

      mmp$make_pt_entry (page_sva, pfti, aste_p, pfte_p, mpt_status);
      IF mpt_status <> mmc$mpt_done THEN
        mmp$relink_page_frame (pfti, mmc$pq_free);
        IF mpt_status = mmc$mpt_page_table_full THEN
          mmv$async_work.pt_full_aste_p := aste_p;
          mmv$async_work.pt_full_sva := page_sva;
          mmv$async_work.pt_full := TRUE;
          mmv$time_to_call_mem_mgr := 0;
          osv$time_to_check_asyn := 0;
          pstatus := ps_pt_full;
        ELSE {must be valid in PT - make sure no other statuses}
          pstatus := ps_valid_in_pt;
        IFEND;
        RETURN; {----->
      IFEND;


{Update the page frame table entry for the new entry.
      IF pfte_p^.task_queue.head <> 0 THEN
        mtp$error_stop ('MM - reassigned PF with task queue');
      IFEND;
      pfte_p^.age := 0;
      pfte_p^.cyclic_age := 0;
      pfte_p^.io_error := ioc$no_error;
      pfte_p^.sva := page_sva;
      pfte_p^.aste_p := aste_p;
      pfte_p^.locked_page := mmc$lp_not_locked;
      pfte_p^.ijl_ordinal := aste_p^.ijl_ordinal;

{Link the page frame into the new queue.
      mmp$relink_page_frame (pfti, aste_p^.queue_id);

      IF first_pfti = 0 THEN
        first_pfti := pfti;
      IFEND;

      assigned_page_count := assigned_page_count + 1;
      assign_page_loop_count := assign_page_loop_count - 1;
      IF assign_page_loop_count > 0 THEN
        temp_offset := page_sva.offset + osv$page_size;
        IF temp_offset > osc$maximum_offset THEN
          assign_page_loop_count := 0;
        ELSE
          page_sva.offset := temp_offset;
        IFEND;
      IFEND;
    WHILEND;

  PROCEND mmp$assign_page_frame;
?? TITLE := 'MMP$PAGE_PULL' ??
?? EJECT ??

{--------------------------------------------------------------------------------------------------------
{ This procedure is called to add a page of a segment to the address space of the current user task.
{ Mmp$page_pull_hash_sva MUST have been called before calling this procedure to verify that the page
{ is not already im memory (in the page table).
{ If this is a multiple page request, it is only known that the first page is not in memory.  Read_pages
{ from_disk_or_server or mmp$assign_page_frame will stop assigning new pages when an existing page is
{ encountered.  Both procedures return the actual number of new pages that were assigned.
{
{ When mmp$page_pull is called by the file server procedure, process_read_for_server, both the
{ cst_p and the stxe_p will be NIL.
{
{  All possible page pull status values MUST be in the CASE statement in the calling procedure, pr_pf.
{  The page pull status type is listed here aid readability of the page fault path.
{     mmt$page_pull_status = (ps_done, ps_found_in_avail, ps_found_in_avail_modified, ps_valid_in_pt,
{     ps_no_memory, ps_low_on_memory, ps_locked, ps_found_on_disk, ps_pt_full, ps_io_temp_reject,
{     ps_new_page_assigned, ps_beyond_file_limit, ps_read_beyond_eoi, ps_no_extend_permission,
{     ps_volume_unavailable, ps_found_on_server, ps_allocate_required_on_server, ps_server_terminated,
{     ps_job_work_required, ps_runaway_write);
{--------------------------------------------------------------------------------------------------------

  PROCEDURE [XDCL] mmp$page_pull
    (    xsva: ost$system_virtual_address;
         fde_p: gft$locked_file_desc_entry_p;
         cst_p: ^ost$cpu_state_table;
         aste_p: ^mmt$active_segment_table_entry;
         stxe_p: ^mmt$segment_descriptor_extended;
         io_id: mmt$io_identifier;
         pages_to_read: integer;
         io_function: iot$io_function;
         allocate_if_new: boolean;
     VAR page_count: mmt$page_frame_index;
     VAR pstatus: mmt$page_pull_status;
     VAR pfti: mmt$page_frame_index);

    VAR
      active_au_offset: integer,
      assigned_page_count: mmt$page_frame_index,
      buffer_descriptor: mmt$buffer_descriptor,
      bytes_to_read: integer,
      file_kind: gft$file_kind,
      file_limits_enforced: sft$file_space_limit_kind,
      ijlo: jmt$ijl_ordinal,
      low_on_page_frames: boolean,
      next_pfti: mmt$page_frame_index,
      nil_cst: boolean,
      page_status: gft$page_status,
      pages_to_allocate: integer,
      passive_fde_p: gft$locked_file_desc_entry_p,
      shadow_au_offset: integer,
      status: syt$monitor_status,
      sva: ost$system_virtual_address,
      update_eoi_reason: mmt$update_eoi_reason,
      xcb_p: ^ost$execution_control_block;

?? NEWTITLE := 'READ_PAGES_FROM_DISK_OR_SERVER', EJECT ??

    PROCEDURE [INLINE] read_pages_from_disk_or_server
      (    io_function: iot$io_function;
           fde_p: gft$locked_file_desc_entry_p;
           sva: ost$system_virtual_address;
           page_count: mmt$page_frame_index;
           aste_p: ^mmt$active_segment_table_entry;
           file_offset: integer;
           io_id: mmt$io_identifier;
           all_requested_needed: boolean;
       VAR assigned_page_count: mmt$page_frame_index;
       VAR pstatus: mmt$page_pull_status;
       VAR pfti: mmt$page_frame_index);

      VAR
        buffer_descriptor: mmt$buffer_descriptor,
        next_pfti: mmt$page_frame_index,
        status: syt$monitor_status;

{ Assign the page frame for the incoming page. Assigned_page_count is initialized in mmp$assign_page_frame.
      mmp$assign_page_frame (sva, aste_p, page_count, 0, assigned_page_count, pfti, pstatus);

      IF (assigned_page_count > 0) AND ((assigned_page_count = page_count) OR NOT all_requested_needed) THEN
        buffer_descriptor.buffer_descriptor_type := mmc$bd_paging_io;
        buffer_descriptor.sva := sva;
        buffer_descriptor.page_count := assigned_page_count;
        IF fde_p^.media = gfc$fm_mass_storage_file THEN
          iop$pager_io (fde_p, file_offset, buffer_descriptor, assigned_page_count * osv$page_size,
                io_function, io_id, status);
          pstatus := ps_found_on_disk;

        ELSE
          dfp$server_io (fde_p, ioc$read_page, file_offset, assigned_page_count * osv$page_size, io_id,
                buffer_descriptor, status);
          pstatus := ps_found_on_server;
        IFEND;
        IF status.normal THEN
          RETURN; {----->
        ELSEIF (status.condition = dme$transient_error) OR (status.condition = ioe$requests_full) THEN
          pstatus := ps_io_temp_reject;
        ELSEIF status.condition = ioe$unit_disabled THEN
          pstatus := ps_volume_unavailable;
        ELSEIF status.condition = dfe$server_has_terminated THEN
          pstatus := ps_server_terminated;
        ELSE
          mtp$error_stop ('MM - unexpected phy io error');
        IFEND;
      IFEND;

{ Not enough frames.  Delete the page table entries for the ones just found.

      WHILE pfti <> 0 DO
        mmp$delete_pt_entry (pfti, TRUE);
        next_pfti := mmv$pft_p^ [pfti].link.bkw;
        mmp$relink_page_frame (pfti, mmc$pq_free);
        pfti := next_pfti;
      WHILEND;

    PROCEND read_pages_from_disk_or_server;
?? OLDTITLE ??
?? EJECT ??

    page_count := 0;
    pfti := 0;
    sva := xsva;
    sva.offset := (sva.offset DIV osv$page_size) * osv$page_size;

    xcb_p := NIL;
    nil_cst := cst_p = NIL;
    IF NOT nil_cst THEN
      xcb_p := cst_p^.xcb_p;
    IFEND;

{A new page frame is required. If the system is running low on memory and the requesting task is not a
{system task, reject the request. This will cause the user to be put in a WAIT state.
{For a served file fault, just check low on memory.

    low_on_page_frames := mmv$reassignable_page_frames.now < mmv$aggressive_aging_level_2;
    IF low_on_page_frames AND ((NOT nil_cst) OR (xcb_p^.system_table_lock_count < 256) AND
          NOT xcb_p^.critical_task) THEN
      pstatus := ps_low_on_memory;
      RETURN; {----->
    IFEND;

{ Check for reference beyond EOI if user does not have EXTEND permission.
    IF (sva.offset >= fde_p^.eoi_byte_address) AND (NOT nil_cst) AND
          ((stxe_p^.access_rights <> mmc$sar_write_extend) OR NOT allocate_if_new) THEN
      IF stxe_p^.access_rights = mmc$sar_modify THEN
        pstatus := ps_no_extend_permission;
      ELSE
        pstatus := ps_read_beyond_eoi;
      IFEND;
      RETURN; {----->
    IFEND;

{ Check for a write that goes beyond eoi by an unreasonable amount.

    IF (sva.offset > fde_p^.eoi_byte_address + mmv$maximum_write_span) THEN
      IF (NOT nil_cst) AND (stxe_p^.access_rights = mmc$sar_write_extend) THEN
{       IF stxe_p^.software_attribute_set *
{             $mmt$software_attribute_set [mmc$sa_wired, mmc$sa_fixed,
{             mmc$sa_stack] = $mmt$software_attribute_set [] THEN
{         pstatus := ps_runaway_write;
{         RETURN; {----->
{       IFEND
        IF ((stxe_p^.software_attribute_set = $mmt$software_attribute_set [mmc$sa_free_behind]) OR
              (stxe_p^.software_attribute_set = $mmt$software_attribute_set [mmc$sa_no_append]) OR
              (stxe_p^.software_attribute_set = $mmt$software_attribute_set [mmc$sa_job_shared]) OR
              (stxe_p^.software_attribute_set = $mmt$software_attribute_set [mmc$sa_read_transfer_unit])) THEN
          pstatus := ps_runaway_write;
          RETURN; {----->
        IFEND;
      IFEND;
    IFEND;

{ Check for reference beyond file limit. Note: during deadstart, a reference beyond
{ EOI may be for the memory resident portion of the old image file.

    IF sva.offset >= fde_p^.file_limit THEN
      IF (mmv$image_file.active) AND (aste_p^.sfid = mmv$image_file.sfid) THEN
        process_memory_image_pf (sva, aste_p, pfti, pstatus);
        page_count := 1;
      ELSE
        pstatus := ps_beyond_file_limit;
      IFEND;
      RETURN; {----->
    IFEND;


{ Determine limits options. Served files always have a NIL cst_p and require no
{ limits checking.

    IF NOT nil_cst THEN
      ijlo := jmv$null_ijl_ordinal;
      file_limits_enforced := sfc$no_limit;
    ELSE
      ijlo := cst_p^.ijl_ordinal;
      file_limits_enforced := stxe_p^.file_limits_enforced;
    IFEND;


{ Determine the status/location of the page.

    CASE fde_p^.media OF
    = gfc$fm_transient_segment =
      IF (aste_p^.pages_in_memory > mmv$max_pages_no_file) AND
            NOT ((aste_p^.queue_id = mmc$pq_wired) OR (aste_p^.queue_id = mmc$pq_job_fixed)) THEN
        set_assign_active (stxe_p, sva.offset);
        tmp$set_monitor_flag (cst_p^.taskid, mmc$mf_segment_mgr_flag, status);
      IFEND;
      page_status := gfc$ps_page_doesnt_exist;
    = gfc$fm_mass_storage_file =
      IF NOT nil_cst THEN
        mmv$last_segment_accessed := (#OFFSET (#LOC (stxe_p^)) - xcb_p^.sdtx_offset) DIV
              #SIZE (mmt$segment_descriptor_extended);
      IFEND;
      dmp$fetch_page_status (fde_p, sva.offset, file_limits_enforced, allocate_if_new, page_status);
    = gfc$fm_served_file =
      dfp$fetch_page_status (fde_p, sva.offset, page_status);
    ELSE
      mtp$error_stop ('MM - bad FDE.MEDIA');
    CASEND;


{ If job mode work is required but the task is in some state where it is not advisable
{ to interrupt it, allow escaped allocation to occur. Otherwise reject the page fault
{ and let the task fix the problem in job mode before assigning the page.

    IF (page_status = gfc$ps_job_mode_work_required) AND (NOT nil_cst) THEN
      set_assign_active (stxe_p, sva.offset);
      IF (xcb_p^.system_table_lock_count > 255) AND (xcb_p^.xp.p_register.pva.ring > 1) THEN
        xcb_p^.stlc_allocation := TRUE;
      ELSE
        tmp$set_monitor_flag (cst_p^.taskid, mmc$mf_segment_mgr_flag, status);
      IFEND;
      IF (xcb_p^.xp.trap_enable <> osc$traps_enabled) OR (xcb_p^.xp.p_register.pva.ring = 1) OR
            (osc$trap_exception IN xcb_p^.xp.monitor_condition_register) OR (fde_p^.stack_for_ring <> 0) OR
            (xcb_p^.system_table_lock_count > 255) THEN
        page_status := gfc$ps_page_doesnt_exist;
      IFEND;
    IFEND;

{ If temp reject is indicated for one of the Device Manager tasks, allow
{ escaped allocation since blocking them might cause a system deadlock.

    IF (page_status = gfc$ps_temp_reject) AND (xcb_p <> NIL) AND
          ((xcb_p^.system_task_id = tmc$stid_administer_log) OR
          (xcb_p^.system_task_id = tmc$stid_dm_split_al) OR (xcb_p^.system_task_id =
          tmc$stid_volume_space_managemnt)) THEN
      page_status := gfc$ps_page_doesnt_exist;
    IFEND;


{Process page fault depending on the location of the page.

    CASE page_status OF
    = gfc$ps_page_on_disk, gfc$ps_page_on_server =
      IF NOT low_on_page_frames THEN
        bytes_to_read := (fde_p^.allocation_unit_size - (sva.offset MOD fde_p^.allocation_unit_size));
        IF sva.offset + bytes_to_read > fde_p^.eoi_byte_address THEN
          bytes_to_read := fde_p^.eoi_byte_address - sva.offset + osv$page_size - 1;
        IFEND;
        page_count := bytes_to_read DIV osv$page_size;
        IF page_count > pages_to_read THEN
          page_count := pages_to_read;
        IFEND;
      ELSE
        page_count := 1;
      IFEND;

      read_pages_from_disk_or_server (io_function, fde_p, sva, page_count, aste_p, sva.offset, io_id, FALSE,
            assigned_page_count, pstatus, pfti);
      page_count := assigned_page_count;

    = gfc$ps_job_mode_work_required =
      mmp$update_eoi (fde_p, sva.offset, mmc$uer_page_assigned); {Needed so job mode knows adr to allocate}
      pstatus := ps_job_work_required;

    = gfc$ps_volume_unavailable =
      IF fde_p^.flags.wire_eoi_page AND (sva.offset >= fde_p^.eoi_byte_address) THEN
        mmp$assign_page_frame (sva, aste_p, 1, 0, page_count, pfti, pstatus);
        IF pstatus = ps_done THEN
          mmp$preset_real_memory (sva, fde_p^.preset_value);
          mmv$pt_p^ [mmv$pft_p^ [pfti].pti].v := TRUE;
          pstatus := ps_new_page_assigned;
          mmp$update_eoi (fde_p, sva.offset, mmc$uer_page_assigned);
        IFEND;
      ELSE
        pstatus := ps_volume_unavailable;
      IFEND;

    = gfc$ps_server_allocate_required =

      mmp$assign_page_frame (sva, aste_p, 1, 0, page_count, pfti, pstatus);
      IF pstatus = ps_done THEN
        mmp$preset_real_memory (sva, fde_p^.preset_value);
        pstatus := ps_allocate_required_on_server;
        IF pages_to_read < mmv$pages_for_overallocation THEN
          pages_to_allocate := mmv$pages_for_overallocation;
        ELSE
          pages_to_allocate := pages_to_read;
        IFEND;
        buffer_descriptor.buffer_descriptor_type := mmc$bd_paging_io;
        buffer_descriptor.sva := sva;
        buffer_descriptor.page_count := page_count;
        dfp$file_server_allocation (aste_p^.sfid, sva.offset, (pages_to_allocate * osv$page_size) +
              sva.offset, io_id, buffer_descriptor, stxe_p^.file_limits_enforced, status);
        IF status.normal THEN
          mmp$update_eoi (fde_p, sva.offset, mmc$uer_page_assigned);
          RETURN; {<----}
        ELSEIF (status.condition = dme$transient_error) OR (status.condition = ioe$requests_full) THEN
          pstatus := ps_io_temp_reject;
        ELSEIF status.condition = ioe$unit_disabled THEN
          pstatus := ps_volume_unavailable;
        ELSEIF status.condition = dfe$server_has_terminated THEN
          pstatus := ps_server_terminated;
        ELSE
          mtp$error_stop ('MM - unexpected phy io error');
        IFEND;
        mmp$delete_pt_entry (pfti, TRUE);
        mmp$relink_page_frame (pfti, mmc$pq_free);
      IFEND;

    = gfc$ps_temp_reject, gfc$ps_account_limit_exceeded =
      pstatus := ps_io_temp_reject;

    = gfc$ps_server_terminated =
      pstatus := ps_server_terminated;

    = gfc$ps_page_doesnt_exist =

{ For page not on server, the file cannot be shadowed.  This applies where
{ the active file is on the server.
{ The pointer to the CPU_STATE_TABLE will always be non-nil on the client side of file_server.

      IF (stxe_p = NIL) OR (stxe_p^.shadow_info.shadow_segment_kind = mmc$ssk_none) OR
            (sva.offset >= (stxe_p^.shadow_info.shadow_length_page_count * osv$page_size)) THEN

{ Determine if multiple pages should be assigned for this "new page" page fault.
{ The numbers 32768 and 16384 are arbitrary.  Files less then 32768 are not as likely
{ to use the extra assigned pages.  Offset MOD 16384 is because most allocation units
{ are 16384.  This fault would then probably be for the first page in the AU.

        pages_to_allocate := 1;

        IF (sva.offset >= 32768) AND ((sva.offset MOD 16384) = 0) AND
              (fde_p^.media <> gfc$fm_transient_segment) AND (fde_p^.file_kind <> gfc$fk_device_file) AND
              (mmv$reassignable_page_frames.now > mmv$assign_multiple_pages) AND
              (mmv$pages_per_new_page_fault > 1) AND

{ Check if assignment of extra pages will fit into allocated space.

        (fde_p^.allocation_unit_size - (sva.offset MOD fde_p^.allocation_unit_size) >=
              mmv$pages_per_new_page_fault * osv$page_size) THEN
          pages_to_allocate := mmv$pages_per_new_page_fault;
        IFEND;

        mmp$assign_page_frame (sva, aste_p, pages_to_allocate, 0, page_count, pfti, pstatus);

        IF page_count > 0 THEN
          pstatus := ps_new_page_assigned;

{ Update EOI for the 1st page assigned (the page faulted for).  Don't change EOI if this is a
{ read for server.  If the client is running with a larger page size, updating the EOI would be
{ wrong.  If the page is written on, EOI will be updated when the client sends it back.

          IF io_function <> ioc$read_for_server THEN
            IF page_count > 1 THEN
              update_eoi_reason := mmc$uer_multiple_pages_assigned;
            ELSE
              update_eoi_reason := mmc$uer_page_assigned;
            IFEND;
            mmp$update_eoi (fde_p, sva.offset, update_eoi_reason);
          ELSE
            fde_p^.eoi_state := mmc$eoi_uncertain;
          IFEND;

{ Preset memory if necessary.

          next_pfti := pfti;
          WHILE next_pfti <> 0 DO
            IF (fde_p^.stack_for_ring = 0) OR (fde_p^.stack_for_ring > 3) THEN
              mmp$preset_real_memory (sva, fde_p^.preset_value);
            IFEND;
            mmv$pt_p^ [mmv$pft_p^ [next_pfti].pti].v := TRUE;
            next_pfti := mmv$pft_p^ [next_pfti].link.bkw;
            IF next_pfti <> 0 THEN
              sva.offset := sva.offset + osv$page_size;
            IFEND;
          WHILEND;
        IFEND;

      ELSE

{ The page is shadowed by another file and the page resides on the shadow file.
{ If shadow is by segment number then assign pages for the transfer unit and set the 'm'
{ bit in the page table.  Put the source and destination pva into the xcb along with the
{ page count for the transfer.  Set the monitor flag, mmc$mf_shadow_file_reference, so that
{ mmp$mfh_shadow_file_reference will be called to copy the data.  Otherwise initiate I/O to
{ read the transfer unit containing the page from the shadow file and set the 'm' bit in the
{ page table entry for each page.

        active_au_offset := (sva.offset DIV fde_p^.allocation_unit_size) * fde_p^.allocation_unit_size;
        shadow_au_offset := (stxe_p^.shadow_info.shadow_start_page_number * osv$page_size) + active_au_offset;

        gfp$mtr_get_locked_fde_p (stxe_p^.shadow_info.shadow_sfid, cst_p^.ijle_p, passive_fde_p);
        sva.offset := active_au_offset;

        bytes_to_read := stxe_p^.shadow_info.shadow_length_page_count * osv$page_size - active_au_offset;
        IF bytes_to_read > fde_p^.allocation_unit_size THEN
          bytes_to_read := fde_p^.allocation_unit_size;
        IFEND;

        page_count := bytes_to_read DIV osv$page_size;

{ If the job is able to take a trap and copy the pages from the passive segment, send a
{ flag to the job to do this.

        IF (xcb_p^.xp.trap_enable = osc$traps_enabled) AND (xcb_p^.xp.p_register.pva.ring > 1) AND
              (stxe_p^.shadow_info.shadow_segment_kind = mmc$ssk_segment_number) THEN
          mmp$assign_page_frame (sva, aste_p, page_count, 0, assigned_page_count, pfti, pstatus);

          IF (assigned_page_count = page_count) THEN
            pstatus := ps_new_page_assigned;

            next_pfti := pfti;
            WHILE next_pfti <> 0 DO
              mmv$pt_p^ [mmv$pft_p^ [next_pfti].pti].v := TRUE;
              mmv$pt_p^ [mmv$pft_p^ [next_pfti].pti].m := TRUE;
              next_pfti := mmv$pft_p^ [next_pfti].link.bkw;
            WHILEND;

            mmp$update_eoi (fde_p, sva.offset + bytes_to_read - osv$page_size, mmc$uer_page_assigned);
            xcb_p^.shadow_reference_info.source_pva := #ADDRESS (1, stxe_p^.shadow_info.shadow_segment_number,
                  shadow_au_offset);
            xcb_p^.shadow_reference_info.destination_pva := #ADDRESS (1, fde_p^.last_segment_number,
                  sva.offset);
            xcb_p^.shadow_reference_info.page_count := page_count;
            tmp$set_monitor_flag (cst_p^.taskid, mmc$mf_shadow_file_reference, status);
          ELSE

{ Not all pages assigned.  Release those that were.

            WHILE pfti <> 0 DO
              mmp$delete_pt_entry (pfti, TRUE);
              next_pfti := mmv$pft_p^ [pfti].link.bkw;
              mmp$relink_page_frame (pfti, mmc$pq_free);
              pfti := next_pfti;
            WHILEND;

          IFEND;

{ If the job cannot trap or if the file is not shadowed by segment number, issue
{ the IO requests to read the pages.

        ELSE
          read_pages_from_disk_or_server (io_function, passive_fde_p, sva, page_count, aste_p,
                shadow_au_offset, io_id, TRUE, assigned_page_count, pstatus, pfti);
          page_count := assigned_page_count;

          IF (pstatus = ps_found_on_disk) OR (pstatus = ps_found_on_server) THEN
            next_pfti := pfti;
            WHILE next_pfti <> 0 DO
              mmv$pt_p^ [mmv$pft_p^ [next_pfti].pti].m := TRUE;
              next_pfti := mmv$pft_p^ [next_pfti].link.bkw;
            WHILEND;
            mmp$update_eoi (fde_p, sva.offset + bytes_to_read - osv$page_size, mmc$uer_page_assigned);
          IFEND;
        IFEND;
      IFEND;

    ELSE
      mtp$error_stop ('mm - unexpected DM error');
    CASEND;

  PROCEND mmp$page_pull;
?? OLDTITLE ??
?? TITLE := 'PROCESS_MEMORY_IMAGE_PF', EJECT ??

  PROCEDURE process_memory_image_pf
    (    sva: ost$system_virtual_address;
         aste_p: ^mmt$active_segment_table_entry;
     VAR pfti: mmt$page_frame_index;
     VAR pstatus: mmt$page_pull_status);

    VAR
      mpt_status: mmt$make_pt_entry_status,
      pfte_p: ^mmt$page_frame_table_entry;

    IF (((sva.offset - mmv$image_file.file_offset) + osv$180_memory_limits.lower) DIV osv$page_size) >
          UPPERVALUE (pfti) THEN
      pstatus := ps_beyond_file_limit;
      pfti := 0;
      RETURN; {<----}
    IFEND;

    pfti := ((sva.offset - mmv$image_file.file_offset) + osv$180_memory_limits.lower) DIV osv$page_size;
    pfte_p := ^mmv$pft_p^ [pfti];
    pfte_p^.age := 0;
    pfte_p^.cyclic_age := 0;
    pfte_p^.sva := sva;
    pfte_p^.aste_p := aste_p;
    pfte_p^.locked_page := mmc$lp_aging_lock;
    pfte_p^.ijl_ordinal := jmv$system_ijl_ordinal;

    mmp$make_pt_entry (sva, pfti, aste_p, pfte_p, mpt_status);

    IF mpt_status = mmc$mpt_done THEN
      mmv$pt_p^ [mmv$pft_p^ [pfti].pti].v := TRUE;
      pstatus := ps_found_in_avail;
    ELSEIF mpt_status = mmc$mpt_page_table_full THEN
      pstatus := ps_pt_full;
      mmv$async_work.pt_full_aste_p := aste_p;
      mmv$async_work.pt_full_sva := sva;
      mmv$async_work.pt_full := TRUE;
      mmv$time_to_call_mem_mgr := 0;
      osv$time_to_check_asyn := 0;
    ELSE
      mtp$error_stop ('MM - error in processing memory image pf');
    IFEND;

  PROCEND process_memory_image_pf;

?? TITLE := 'PR_PF - Primary entry point for PF processing', EJECT ??

{Purpose:
{  This routine is called by monitor to process a page fault.

  PROCEDURE [XDCL] pr_pf
    (    dummy: ^cell;
         cst_p: ^ost$cpu_state_table);

    TYPE
      trick_ptr = record
        case boolean of
        = TRUE =
          pva: ost$pva,
        = FALSE =
          p: ^cell,
        casend,
      recend;

    VAR
      null_utp: [STATIC, READ] ost$pva := [1, 0fff(16), 7fffffff(16)];

    VAR
      aste_p: ^mmt$active_segment_table_entry,
      avail_mod_queue_overrun: boolean,
      check_aio_slowdown: boolean,
      count: 1 .. 32,
      cptime: ost$cp_time_value,
      faulted_tu: integer,
      fcount: integer,
      fde_p: gft$locked_file_desc_entry_p,
      file_limit: integer, {must be integer}
      found: boolean,
      i: 0 .. 0ffff(16),
      ipti: integer,
      ijle_p: ^jmt$initiated_job_list_entry,
      keypoint_page_fault_status: mmt$keypoint_page_fault_status,
      mcount: integer,
      monitor_fault: ost$monitor_fault,
      mws: integer,
      page_count: mmt$page_frame_index,
      paging_statistics_ijl_p: ^ost$paging_statistics,
      paging_statistics_xcb_p: ^ost$paging_statistics,
      pfti: mmt$page_frame_index,
      pstatus: mmt$page_pull_status,
      pva: trick_ptr,
      rcount: integer,
      relative_transfer_unit: integer,
      sac_p: ^mmt$segment_access_condition,
      ste_p: ^mmt$segment_descriptor,
      streaming_transfer_unit: integer,
      stxe_p: ^mmt$segment_descriptor_extended,
      sva: ost$system_virtual_address,
      xcb_p: ^ost$execution_control_block,
      xsva: ost$system_virtual_address,
      xpfti: mmt$page_frame_index;

?? NEWTITLE := '[inline] P$FIND_THE_PAGE', EJECT ??

    PROCEDURE [INLINE] p$find_the_page;

      VAR
        io_id: mmt$io_identifier,
        last_faulted_tu: integer,
        last_page_fault: ost$segment_offset,
        nominal_page_fault: boolean,
        page_streaming_available_page: boolean,
        pages_pulled: integer,
        pages_to_be_pulled: integer,
        pages_to_read: integer,
        pfti_of_faulted_page: mmt$page_frame_index,
        pstatus_of_faulted_page: mmt$page_pull_status,
        streaming_transfer_pages: integer,
        sva_current: ost$system_virtual_address,
        sva_start: ost$segment_offset,
        temp_offset: integer,
        transfer_unit_count: integer,
        tu_to_stream: integer;

      IF aste_p = NIL THEN
        faulted_tu := 0;
        page_count := 0;
        pstatus := ps_read_beyond_eoi;

      ELSEIF stxe_p^.access_state <> mmc$sas_allow_access THEN
        IF stxe_p^.access_state = mmc$sas_inhibit_access THEN
          mmv$refs_to_unrec_df_file_inhib := mmv$refs_to_unrec_df_file_inhib + 1;
          pstatus := ps_volume_unavailable;
        ELSEIF stxe_p^.access_state = mmc$sas_terminate_access THEN
          mmv$refs_to_unrec_df_file_term := mmv$refs_to_unrec_df_file_term + 1;
          pstatus := ps_server_terminated;
        IFEND;
        faulted_tu := 0;
        page_count := 0;

      ELSE
        io_id.specified := FALSE;
        streaming_transfer_pages :=
        #SHIFT (1, stxe_p^.stream.transfer_size);
        streaming_transfer_unit := streaming_transfer_pages * osv$page_size;
        faulted_tu := sva.offset DIV streaming_transfer_unit;
        last_page_fault := stxe_p^.stream.last_page_fault;
        stxe_p^.stream.last_page_fault := sva.offset;
        last_faulted_tu := last_page_fault DIV streaming_transfer_unit;
        relative_transfer_unit := faulted_tu - last_faulted_tu;

        mmp$page_pull_hash_sva (sva, aste_p, page_count, pstatus, pfti);

{ If the page was found, the code below will exit p$find_the_page quickly if the segment is not in prestream
{ or stream mode.  If the page is not found or if the segment is in a stream mode, page_count, pstatus, and
{ pfti will be used later to determine if a call to mmp$page_pull is necessary.  Segments in a stream mode do
{ not attempt the quick exit because:   A) If an available page is found, the stream code will pull another
{ page.   B) If a locked/valid page is found, a segment in stream mode may need to read another transfer unit.


        nominal_page_fault := TRUE;

{ To detect sequential processing of a segment, the segment is logically divided into page streaming transfer
{ units.  If a page fault is either in the same transfer unit or in the next transfer unit then it is
{ considered to be sequential processing.   Prestream mode is entered when the number of consecutive page
{ faults that appear to be sequential exceeds the mmv$page_streaming_prestream.  In prestream mode the
{ pages from the current page fault to the end of the transfer unit will be pulled.  Once we get to
{ page streaming mode the process will keep IO requests outstanding to read the pages from the current
{ transfer unit plus read ahead one or more TU (total number of TU read = mmv$page_streaming_reads)
{ The code below checks the prestream threshold, counts sequential accesses, determines if the current page
{ fault is to be considered sequential, and either continues or terminates the sequential process.

        IF stxe_p^.stream.sequential_accesses < mmv$page_streaming_prestream THEN
          IF (relative_transfer_unit < 0) OR (relative_transfer_unit > 1) THEN
            stxe_p^.stream.sequential_accesses := 1; {Reset counter, count current fault as first fault
          ELSE
            stxe_p^.stream.sequential_accesses := stxe_p^.stream.sequential_accesses + 1;
          IFEND;
          IF page_count = 1 THEN
            RETURN; {----->
          IFEND;

        ELSE {prestream mode or stream mode}

{ The segment is in prestream or stream.  If it is very well behaved the relative transfer unit will be
{ equal to zero or one.  If zero, do not increment the sequential accesses count since a fault has already
{ occurred in this TU. (The algorithm to switch to page streaming mode assumes each fault after prestream
{ mode is entered is equal to a transfer_unit of data)  If one, increment the sequential accesses count.
{ For the special cases listed below, continue but don't increment the sequential count, wait for
{ confirming sequential faults before going to page streaming mode.  the special cases:
{ a) Faulted_tu = 0; A fault in the first transfer unit.  Since the last fault was in a higher TU, a fault
{                    in TU=0 may indicate a rewind of the file.
{ b) Relative_Transfer_Unit greater than one but less than mmv$page_streaming_reads+1;  If in prestream mode
{                    the task has skipped at least one transfer unit, In stream mode we are not sure if it is
{                    normal or if a tranfer unit has been skipped.
{ c) Preset_Streaming;  A special case to immediately enter page streaming mode.
{
{ Note that nominal_page_fault  = TRUE

          IF relative_transfer_unit = 1 THEN
            nominal_page_fault := FALSE;
            IF stxe_p^.stream.random_faults > 0 THEN
              stxe_p^.stream.random_faults := 0;
              mmv$paging_statistics.page_streaming.random_faults :=
                    mmv$paging_statistics.page_streaming.random_faults + 1;
            IFEND;
            IF (stxe_p^.stream.sequential_accesses < UPPERVALUE (stxe_p^.stream.sequential_accesses)) THEN
              stxe_p^.stream.sequential_accesses := stxe_p^.stream.sequential_accesses + 1;
            IFEND;

          ELSEIF (((relative_transfer_unit >= 0) AND (relative_transfer_unit <=
                (mmv$page_streaming_reads + 1))) OR (faulted_tu = 0) OR (stxe_p^.stream.preset_streaming))
                THEN
            nominal_page_fault := FALSE;

{ This fault is considered random because it is either prior to the transfer unit of the last fault or it is
{ in a TU that is more than mmv$page_streaming_reads past the TU of the last fault.  If in prestream mode
{ terminate prestream.  If in page streaming mode, allow up to mmv$page_streaming_random_limit random faults.

          ELSEIF NOT stxe_p^.stream.streaming THEN

{terminate prestream mode,  count current fault as first fault

            mmv$paging_statistics.page_streaming.prestream_only :=
                  mmv$paging_statistics.page_streaming.prestream_only + 1;
            stxe_p^.stream.sequential_accesses := 1;

          ELSE {Page fault is in a transfer unit that is considered random, terminate if appropriate
            stxe_p^.stream.random_faults := stxe_p^.stream.random_faults + 1;
            IF stxe_p^.stream.random_faults < mmv$page_streaming_random_limit THEN

{ Doing nothing will suspend streaming for this fault

            ELSE { Terminate Streaming
              mmv$paging_statistics.page_streaming.terminated :=
                    mmv$paging_statistics.page_streaming.terminated + 1;
              stxe_p^.stream.sequential_accesses := 1;
              stxe_p^.stream.random_faults := 0;
              stxe_p^.stream.streaming := FALSE;
            IFEND;
          IFEND;
        IFEND; {prestream mode or stream mode}


{ If this page fault is to be processed via normal page fault processing nominal_page_fault will be TRUE.
{ Otherwise this fault will be processed as a page streaming fault in which one or more transfer units will
{ be read.  If page streaming, the first call to page pull is for the actual page that faulted.  The status
{ from that call must be saved so that the processing at the end can be determined by the page that faulted.
{ All page streaming calls that are reading ahead must not cause the allocation of a new page (we will wait
{ to allocate until the task actually faults for the page) and if an error occurs it is just to terminate
{ this instant of read ahead without being processed as an error (again, if the task actually faults for the
{ page that got an error, the error will be processed at that time)
{ Note that if the earlier call to mmp$page_pull_hash_sva found a page, page_count = 1 and pstatus
{ and pfti refer to that page.

        IF nominal_page_fault THEN
          IF page_count = 0 THEN {call mmp$page_pull if the page was not found by mmp$page_pull_hash_sva
            IF mmc$sa_read_transfer_unit IN stxe_p^.software_attribute_set THEN
              pages_to_read := streaming_transfer_pages;
              IF stxe_p^.stream.sequential_accesses < mmv$page_streaming_prestream THEN
                stxe_p^.stream.sequential_accesses := stxe_p^.stream.sequential_accesses + 1;
              IFEND;
            ELSEIF (ste_p^.ste.xp = osc$non_executable) OR (ste_p^.ste.wp <> osc$non_writable) THEN
              pages_to_read := mmv$read_tu_read_write;
            ELSE
              pages_to_read := mmv$read_tu_execute;
            IFEND;

            IF mmv$trap_page_fault THEN
              IF (pva.pva.seg = 20(16)) AND (pva.pva.offset >= 20200000(16)) THEN
                mtp$step_unstep_system (syc$ic_software_breakpoint, 'MM-TRAP SEG 20 LARGE OFFSET');
              IFEND;
            IFEND;

            mmp$page_pull (sva, fde_p, cst_p, aste_p, stxe_p, io_id, pages_to_read, ioc$read_page, TRUE,
                  page_count, pstatus, pfti);

            IF pstatus = ps_done THEN
              mtp$error_stop ('MM - internal error-ps_done status from MMP$PAGE_PULL');
            IFEND;
          IFEND;

        ELSE {not a nominal page fault... read one or more transfer units
          transfer_unit_count := 1;
          IF stxe_p^.stream.preset_streaming THEN {segment has been preset to stream immediately
            stxe_p^.stream.preset_streaming := FALSE;
            IF NOT stxe_p^.stream.streaming THEN
              mmv$paging_statistics.page_streaming.initiated :=
                    mmv$paging_statistics.page_streaming.initiated + 1;
            IFEND;
            stxe_p^.stream.streaming := TRUE;
            transfer_unit_count := mmv$page_streaming_reads;

          ELSEIF relative_transfer_unit <= 0 THEN

{ Note, more than likely this fault is awaiting the disk completion of the second allocation unit within this
{ transfer unit.  The relative transfer unit would only be negative if this a fault in TU=0 of the file while
{ the file is in page streaming mode.

            IF (sva.offset < last_page_fault) AND (relative_transfer_unit = 0) THEN
              mmv$paging_statistics.page_streaming.page_faults_tu :=
                    mmv$paging_statistics.page_streaming.page_faults_tu + 1;
            IFEND;
          ELSE
            IF stxe_p^.stream.streaming THEN
              IF (relative_transfer_unit <= mmv$page_streaming_reads) THEN
                transfer_unit_count := mmv$page_streaming_reads; {normal streaming, read ahead
              IFEND;
              IF (relative_transfer_unit = mmv$page_streaming_reads) THEN
                mmv$paging_statistics.page_streaming.task_slow :=
                      mmv$paging_statistics.page_streaming.task_slow + 1;
              IFEND;
            ELSE {not yet streaming
              tu_to_stream := (mmv$page_streaming_threshold DIV
                    streaming_transfer_unit) + mmv$page_streaming_prestream;
              IF stxe_p^.stream.sequential_accesses > tu_to_stream THEN {Initiate streaming
                transfer_unit_count := mmv$page_streaming_reads;
                stxe_p^.stream.streaming := TRUE;
                mmv$paging_statistics.page_streaming.initiated :=
                      mmv$paging_statistics.page_streaming.initiated + 1;
              IFEND; {Initiate streaming
            IFEND; { streaming boolean
          IFEND;

{ Prepare to read pages from the current page fault to the end of one or more transfer units.  The counters
{ for the pages are setup so that we can skip to the end of a transfer unit in some case (locked page).
{   pages_to_read -  number of pages to be read from the current transfer unit
{   pages_to_be_pulled - total number of pages in all of the TU being pulled (includes pages before fault)
{   pages_pulled - number of pages already pulled including pages in the faulted TU prior to fault.
{   sva_start - the sva offset of the beginning of the faulted transfer unit

          sva_start := faulted_tu * streaming_transfer_unit;
          sva_current := sva;
          pages_to_read := streaming_transfer_pages - ((sva.offset - sva_start) DIV osv$page_size);
          pages_pulled := streaming_transfer_pages - pages_to_read;
          pages_to_be_pulled := streaming_transfer_pages * transfer_unit_count;

{ The total number of pages that may actually be pulled equals the total number of pages in the TUs under
{ consideration minus the pages before the faulted page (pages_to_be_pulled - pages_pulled).  That total is
{ to be restricted to 25% of the working set.  WARNING: If at some time in the future the page pulls begin
{ at the start of the TU, the check will be more complicated to handle a large TU > 25% of MAXWS

          IF (pages_to_be_pulled - pages_pulled) > (cst_p^.jcb_p^.max_working_set_size DIV 4) THEN
            pages_to_be_pulled := pages_pulled + (cst_p^.jcb_p^.max_working_set_size DIV 4);
            IF pages_to_read > (cst_p^.jcb_p^.max_working_set_size DIV 4) THEN
              pages_to_read := (cst_p^.jcb_p^.max_working_set_size DIV 4);
            IFEND;
          IFEND;

{ Note that page_count and pstatus are still set from the initial call to mmp$page_pull_hash_sva which
{ was done early in pr_pf.  page_count = 1 if the page was found.  The quick exit from p$find_the_page
{ was not taken because the segment was in a stream mode.

          IF page_count = 0 THEN
            mmp$page_pull (sva, fde_p, cst_p, aste_p, stxe_p, io_id, pages_to_read, ioc$read_page, TRUE,
                  page_count, pstatus, pfti);
          IFEND;

          pfti_of_faulted_page := pfti;
          pstatus_of_faulted_page := pstatus;
          page_streaming_available_page := FALSE;

        /exit_and_continue_stream/
          BEGIN

          /pull_pages_page_streaming/
            WHILE TRUE DO
              pages_pulled := pages_pulled + page_count;
              pages_to_read := pages_to_read - page_count; {number of pages left in the current TU
              IF (pstatus <> ps_locked) AND (pstatus <> ps_valid_in_pt) THEN
                IF stxe_p^.stream.streaming THEN
                  mmv$paging_statistics.page_streaming.pages_streaming :=
                        mmv$paging_statistics.page_streaming.pages_streaming + page_count;
                ELSE
                  mmv$paging_statistics.page_streaming.pages_prestream :=
                        mmv$paging_statistics.page_streaming.pages_prestream + page_count;
                IFEND;
              IFEND;

              CASE pstatus OF
              = ps_locked, ps_valid_in_pt = {these pages are not counted, they were counted when initiated
                pages_pulled := pages_pulled + pages_to_read;
                pages_to_read := 0; {skip to the next transfer unit}

              = ps_found_on_disk =
                paging_statistics_ijl_p^.page_in_count := paging_statistics_ijl_p^.page_in_count + page_count;
                paging_statistics_xcb_p^.page_in_count := paging_statistics_xcb_p^.page_in_count + page_count;
                mmv$paging_statistics.ps_pages.disk := mmv$paging_statistics.ps_pages.disk + page_count;

              = ps_found_in_avail, ps_found_in_avail_modified =
                paging_statistics_ijl_p^.pages_reclaimed_from_queue :=
                      paging_statistics_ijl_p^.pages_reclaimed_from_queue + page_count;
                paging_statistics_xcb_p^.pages_reclaimed_from_queue :=
                      paging_statistics_xcb_p^.pages_reclaimed_from_queue + page_count;
                mmv$paging_statistics.ps_pages.reclaim := mmv$paging_statistics.ps_pages.reclaim + page_count;

{ Pages from the available queues probably indicate the file is being referenced again.  It is
{ likely that the next pages are also in one of available queues and therefore the potential
{ performance gain of reading pages ahead is reduced from saving disk accesses to saving only
{ page faults.  Allow up to two pages to come from the available queues and then exit.  Thus
{ if the pages are in the available queues, we will get two pages per page fault when streaming.

                IF page_streaming_available_page THEN
                  EXIT /exit_and_continue_stream/; {----->
                IFEND;
                page_streaming_available_page := TRUE; {indicate one page has been found in available queues
              = ps_new_page_assigned, ps_allocate_required_on_server =
                paging_statistics_ijl_p^.new_pages_assigned := paging_statistics_ijl_p^.new_pages_assigned +
                      page_count;
                paging_statistics_xcb_p^.new_pages_assigned := paging_statistics_xcb_p^.new_pages_assigned +
                      page_count;
                mmv$paging_statistics.ps_pages.new := mmv$paging_statistics.ps_pages.new + page_count;

{ Allocation of new pages occurs one at a time, terminate page streaming to avoid overhead

                EXIT /pull_pages_page_streaming/; { terminate page streaming

              = ps_found_on_server =
                paging_statistics_ijl_p^.pages_from_server := paging_statistics_ijl_p^.pages_from_server +
                      page_count;
                paging_statistics_xcb_p^.pages_from_server := paging_statistics_xcb_p^.pages_from_server +
                      page_count;
                mmv$paging_statistics.ps_pages.server := mmv$paging_statistics.ps_pages.server + page_count;

              = ps_done =
                mtp$error_stop ('MM - internal error-ps_done status from MMP$PAGE_PULL');
              ELSE

{ ps_read_beyond_eoi, ps_no_extend_permission, ps_beyond_file_limit
{ ps_server_terminated,  ps_io_temp_reject,  ps_pt_full, ps_volume_unavailable
{ ps_no_memory, ps_low_on_memory, ps_job_work_required, ps_runaway_write

                EXIT /pull_pages_page_streaming/; { terminate page streaming
              CASEND;

              IF pages_pulled >= pages_to_be_pulled THEN
                EXIT /exit_and_continue_stream/; {----->
              IFEND;
              IF pages_to_read <= 0 THEN
                pages_to_read := streaming_transfer_pages;
              IFEND;
              temp_offset := sva_start + pages_pulled * osv$page_size;
              IF (temp_offset > osc$maximum_offset) THEN
                EXIT /pull_pages_page_streaming/; { terminate page streaming
              IFEND;
              sva_current.offset := temp_offset;
              mmp$page_pull_hash_sva (sva_current, aste_p, page_count, pstatus, pfti);
              IF page_count = 0 THEN
                mmp$page_pull (sva_current, fde_p, cst_p, aste_p, stxe_p, io_id, pages_to_read, ioc$read_page,
                      FALSE, page_count, pstatus, pfti);
              IFEND;
            WHILEND /pull_pages_page_streaming/;

{ Terminate page streaming if exit here, usual exit is to skip this code

            stxe_p^.stream.sequential_accesses := 0;
            stxe_p^.stream.random_faults := 0;
            stxe_p^.stream.streaming := FALSE;

          END /exit_and_continue_stream/;

          page_count := 0;
          pstatus := pstatus_of_faulted_page;
          pfti := pfti_of_faulted_page;

        IFEND; {nominal_page_fault}
      IFEND;

    PROCEND p$find_the_page;
?? OLDTITLE ??
?? EJECT ??

    xcb_p := cst_p^.xcb_p;

{Use a special routine for page faults which occur during deadstart.
    IF NOT mmv$tables_initialized THEN
      pf_proc_tables_not_initialized (xcb_p);
      RETURN; {----->
    IFEND;

{ Process page fault on keypoint segment separate from other page faults

    IF (xcb_p^.xp.untranslatable_pointer.seg = osc$kpt_pva_segment) THEN
      osp$process_keypoint_page_fault (xcb_p^.xp.untranslatable_pointer.offset, keypoint_page_fault_status);
      CASE keypoint_page_fault_status OF

      = mmc$kpfs_normal =
        RETURN; {----->

      = mmc$kpfs_disable_keypoints =
        xcb_p^.xp.p_register.pva.offset := xcb_p^.xp.p_register.pva.offset + 4;
        RETURN; {----->

      = mmc$kpfs_invalid_keypoint =

{ Do nothing; subsequent page fault processing will reject with an access violation.

      ELSE
      CASEND;
    IFEND;

{Get the PVA that caused the page fault from the exchange package of the current user task and convert it to
{an SVA.

    pva.pva := xcb_p^.xp.untranslatable_pointer;

    mmp$convert_pva (pva.p, cst_p, sva, fde_p, aste_p, ste_p, stxe_p);

    ijle_p := cst_p^.ijle_p;
    paging_statistics_ijl_p := ^ijle_p^.statistics.paging_statistics;
    paging_statistics_xcb_p := ^xcb_p^.paging_statistics;

    p$find_the_page;

{ We have gotten here in one of four ways in which pstatus may have been set:
{    1. aste_p = NIL  (not likely to happen)
{    2. The page was found by the proc mmp$page_pull_hash_sva and then the quick exit was taken.  At this
{       point this case is exactly the same as a nominal page fault (case 3).
{    3. Nominal page fault (PSTATUS and PAGE_COUNT reflect the call to pull the faulted page)
{    4. Page streaming mode pulled the faulted page and saved PSTATUS.  Additional page pulls may have been
{       performed after the faulted page was pulled.  PSTATUS has been restored to the value returned with
{       the faulted page but PAGE_COUNT = 0 because the page counters were updated by the page streaming code.

    paging_statistics_xcb_p^.page_fault_count := paging_statistics_xcb_p^.page_fault_count + 1;
    paging_statistics_ijl_p^.page_fault_count := paging_statistics_ijl_p^.page_fault_count + 1;

    CASE pstatus OF
    = ps_found_in_avail, ps_found_in_avail_modified =
      paging_statistics_ijl_p^.pages_reclaimed_from_queue :=
            paging_statistics_ijl_p^.pages_reclaimed_from_queue + page_count;
      paging_statistics_xcb_p^.pages_reclaimed_from_queue :=
            paging_statistics_xcb_p^.pages_reclaimed_from_queue + page_count;
      mmv$paging_statistics.pf_pages.reclaim := mmv$paging_statistics.pf_pages.reclaim + page_count;

    = ps_new_page_assigned =
      paging_statistics_ijl_p^.new_pages_assigned := paging_statistics_ijl_p^.new_pages_assigned + page_count;
      paging_statistics_xcb_p^.new_pages_assigned := paging_statistics_xcb_p^.new_pages_assigned + page_count;
      mmv$paging_statistics.pf_pages.new := mmv$paging_statistics.pf_pages.new + page_count;
      stxe_p^.stream.sequential_accesses := 0; { zero count to prevent prestream mode on next PF


{ Since new pages are being allocated, force the transfer unit size to be at least an allocation unit
{ so that if free behind is performed it will go back at least an allocation size to free pages.
{ relative_transfer_unit is not recalculated, it will just cause free behind to be attempted early.

      IF fde_p^.allocation_unit_size > streaming_transfer_unit THEN
        streaming_transfer_unit := fde_p^.allocation_unit_size;
        faulted_tu := sva.offset DIV streaming_transfer_unit;
      IFEND;

    = ps_valid_in_pt, ps_job_work_required =

    = ps_read_beyond_eoi, ps_no_extend_permission =
      monitor_fault.identifier := mmc$segment_fault_processor_id;
      sac_p := #LOC (monitor_fault.contents);
      IF pstatus = ps_read_beyond_eoi THEN
        sac_p^.identifier := mmc$sac_read_beyond_eoi;
      ELSE
        sac_p^.identifier := mmc$sac_no_append_permission;
      IFEND;
      sac_p^.segment := pva.p;
      tmp$send_monitor_fault (cst_p^.taskid, #LOC (monitor_fault), TRUE);

    = ps_beyond_file_limit =
      monitor_fault.identifier := mmc$segment_fault_processor_id;
      sac_p := #LOC (monitor_fault.contents);
      sac_p^.identifier := mmc$sac_read_write_beyond_msl;
      sac_p^.segment := pva.p;
      IF fde_p^.stack_for_ring > 0 THEN
        file_limit := fde_p^.file_limit + 500000;
        IF file_limit > 07fffffff(16) THEN
          file_limit := 7fffffff(16);
        IFEND;
        fde_p^.file_limit := file_limit;
      IFEND;
      tmp$send_monitor_fault (cst_p^.taskid, #LOC (monitor_fault), FALSE);

    = ps_server_terminated =
      monitor_fault.identifier := mmc$segment_fault_processor_id;
      sac_p := #LOC (monitor_fault.contents);
      sac_p^.identifier := mmc$sac_file_server_terminated;
      sac_p^.segment := pva.p;
      tmp$send_monitor_fault (cst_p^.taskid, #LOC (monitor_fault), FALSE);

    = ps_found_on_disk =
      paging_statistics_ijl_p^.page_in_count := paging_statistics_ijl_p^.page_in_count + page_count;
      paging_statistics_xcb_p^.page_in_count := paging_statistics_xcb_p^.page_in_count + page_count;
      mmv$paging_statistics.pf_pages.disk := mmv$paging_statistics.pf_pages.disk + page_count;

{NOTE
{Job recovery uses active io count to determine the status of a page after a crash ...
{DONT MESS WITH ACTIVE IO COUNT UNLESS YOU UNDERSTAND IT'S USE IN JOB RECOVERY. Thank You.

      IF mmv$pft_p^ [pfti].active_io_count = 0 THEN
        mtp$error_stop ('MM - page fault queue no IO');
      IFEND;
      tmp$queue_task (cst_p^.taskid, tmc$ts_page_wait, mmv$pft_p^ [pfti].task_queue);
      xcb_p^.page_wait_info.pva := pva.p;

    = ps_locked =

{NOTE
{Job recovery uses active io count to determine the status of a page after a crash ...
{DONT MESS WITH ACTIVE IO COUNT UNLESS YOU UNDERSTAND IT'S USE IN JOB RECOVERY. Thank You.

      IF mmv$pft_p^ [pfti].active_io_count = 0 THEN
        mtp$error_stop ('MM - page fault queue no IO');
      IFEND;
      tmp$queue_task (cst_p^.taskid, tmc$ts_page_wait, mmv$pft_p^ [pfti].task_queue);
      xcb_p^.page_wait_info.pva := pva.p;

    = ps_io_temp_reject =
      tmp$cause_task_switch;

    = ps_pt_full =
      cst_p^.dispatch_control.asynchronous_interrupts_pending := TRUE;
      tmp$cause_task_switch;

    = ps_no_memory, ps_low_on_memory =
      tmp$queue_task (cst_p^.taskid, tmc$ts_memory_wait, mmv$memory_wait_queue);

{  Process the case of the page found on the server mainframe.

    = ps_found_on_server =
      paging_statistics_ijl_p^.pages_from_server := paging_statistics_ijl_p^.pages_from_server + page_count;
      paging_statistics_xcb_p^.pages_from_server := paging_statistics_xcb_p^.pages_from_server + page_count;
      mmv$paging_statistics.pf_pages.server := mmv$paging_statistics.pf_pages.server + page_count;

{ Check active_io_count to insure activity exists.  No activity is fatal.
      IF mmv$pft_p^ [pfti].active_io_count = 0 THEN
        mtp$error_stop ('MM - page fault queue no IO, ps_found_on_server');
      IFEND;

{ Queue the task in page wait.
      tmp$queue_task (cst_p^.taskid, tmc$ts_page_wait, mmv$pft_p^ [pfti].task_queue);
      xcb_p^.page_wait_info.pva := pva.p;

    = ps_allocate_required_on_server =
      paging_statistics_ijl_p^.new_pages_assigned := paging_statistics_ijl_p^.new_pages_assigned + page_count;
      paging_statistics_xcb_p^.new_pages_assigned := paging_statistics_xcb_p^.new_pages_assigned + page_count;
      mmv$paging_statistics.pf_pages.new := mmv$paging_statistics.pf_pages.new + page_count;
      stxe_p^.stream.sequential_accesses := 0; { zero  count to prevent prestream mode on next PF


{ Since new pages are being allocated, force the transfer unit size to be at least an allocation unit
{ so that if free behind is performed it will go back at least an allocation size to free pages.
{ relative_transfer_unit is not recalculated, it will just cause free behind to be attempted early.

      IF fde_p^.allocation_unit_size > streaming_transfer_unit THEN
        streaming_transfer_unit := fde_p^.allocation_unit_size;
        faulted_tu := sva.offset DIV streaming_transfer_unit;
      IFEND;

{ Check active_io_count to insure activity exists.  No activity is fatal.

      IF mmv$pft_p^ [pfti].active_io_count = 0 THEN
        mtp$error_stop ('MM - page fault queue no IO, ps_found_on_server');
      IFEND;

{ Queue the task in page wait.

      tmp$queue_task (cst_p^.taskid, tmc$ts_page_wait, mmv$pft_p^ [pfti].task_queue);
      xcb_p^.page_wait_info.pva := pva.p;

    = ps_volume_unavailable =
      xcb_p^.page_wait_info.pva := pva.p;
      mmp$process_volume_unavailable (xcb_p, FALSE);
    = ps_done =
      mtp$error_stop ('MM - internal error-ps_done status from MMP$PAGE_PULL');

    = ps_runaway_write =
      monitor_fault.identifier := mmc$segment_fault_processor_id;
      sac_p := #LOC (monitor_fault.contents);
      sac_p^.identifier := mmc$sac_runaway_write;
      sac_p^.segment := pva.p;
      tmp$send_monitor_fault (cst_p^.taskid, #LOC (monitor_fault), TRUE);

    CASEND;



{ If appropriate do free behind.  Free pages that are in the transfer units prior to the transfer unit
{ immediately before the current page fault transfer unit.  (i.e.  faulted_tu -2, -3, -4 ... etc.)
{ Stop freeing pages at the first page that is not freed because it is locked or it is not found.
{ Note that if a new page was assigned, a check was made to force transfer size >= allocation size.
{ Since this code looks at the pages in reverse order, the PFTI procs mmp$reset_store_pfti_reverse
{ and mmp$store_pfti_reverse are used so that  mmp$remove_pages_from_jws will free the pages in
{ sva ascending order  ... this is helpful if the pages are modified

    IF (mmc$sa_free_behind IN stxe_p^.software_attribute_set) AND
          (fde_p^.media <> gfc$fm_transient_segment) THEN
      xsva := sva;
      IF ((relative_transfer_unit > 0)) AND (faulted_tu > 1) THEN
        xsva.offset := ((faulted_tu - 1) * streaming_transfer_unit);
        mmp$reset_store_pfti_reverse;

      /free_behind/
        WHILE xsva.offset > 0 DO {since xsva is page boundary, if >0 it will be at least = 1 page.
          xsva.offset := xsva.offset - osv$page_size;
          #HASH_SVA (xsva, ipti, count, found);
          IF NOT found THEN
            EXIT /free_behind/; { Exit if the page was not found
          ELSE
            xpfti := (mmv$pt_p^ [ipti].rma * 512) DIV osv$page_size;
            IF (mmv$pft_p^ [xpfti].queue_id >= mmc$pq_first_valid_in_pt) AND
                  (mmv$pft_p^ [xpfti].locked_page = mmc$lp_not_locked) THEN
              mmp$store_pfti_reverse (xpfti);
            ELSE
              EXIT /free_behind/; {Exit if the page is locked
            IFEND;
          IFEND;
        WHILEND /free_behind/;

        mmp$fetch_pfti_array_size (rcount);
        IF rcount > 0 THEN
          mmp$remove_pages_from_jws (mmc$pq_avail_modified, ijle_p, NOT mmv$wait_on_avail_mod_q_full, fcount,
                mcount, rcount);
          mmv$paging_statistics.page_streaming.pages_freed_behind :=
                mmv$paging_statistics.page_streaming.pages_freed_behind + rcount;
          IF (fcount > 0)
{       } AND (cst_p^.ijl_ordinal <> jmv$system_ijl_ordinal)
{       } AND (NOT xcb_p^.critical_task)
{       } AND (tmv$ptl_p^ [cst_p^.taskid.index].new_task_status = tmc$ts_null) THEN
            tmp$queue_task (cst_p^.taskid, tmc$ts_avail_mod_q_full_wait, mmv$avail_mod_wait_queue);
          IFEND;
        IFEND;
      IFEND; {faulted tu GT 1
    IFEND; {free behind



{Update page fault statistics.

    mmv$pf_statistics [$INTEGER (pstatus)] := mmv$pf_statistics [$INTEGER (pstatus)] + 1;
    i := mmv$pf_sva_array.next_i + 1;
    i := i MOD num_pf_recs;
    mmv$pf_sva_array.next_i := i;
    mmv$pf_sva_array.pf_recs [i].sva := sva;
    mmv$pf_sva_array.pf_recs [i].pstatus_time := (#FREE_RUNNING_CLOCK (0) DIV 131072) MOD
          100(16) + $INTEGER (pstatus) * 100(16);


{Scan the JWS if the job cp time exceeds the aging threshold.

    IF mmv$aging_algorithm >= 4 THEN
      cptime := ijle_p^.statistics.cp_time.time_spent_in_job_mode;
    ELSE
      cptime := ijle_p^.statistics.cp_time.time_spent_in_mtr_mode +
            ijle_p^.statistics.cp_time.time_spent_in_job_mode;
    IFEND;

    IF cptime > cst_p^.jcb_p^.cptime_next_age_working_set THEN
      mmp$age_job_working_set (ijle_p, cst_p^.jcb_p, NOT mmv$wait_on_avail_mod_q_full,
            avail_mod_queue_overrun);
      IF avail_mod_queue_overrun
{   } AND (cst_p^.ijl_ordinal <> jmv$system_ijl_ordinal)
{   } AND (NOT xcb_p^.critical_task)
{   } AND (tmv$ptl_p^ [cst_p^.taskid.index].new_task_status = tmc$ts_null) THEN
        tmp$queue_task (cst_p^.taskid, tmc$ts_avail_mod_q_full_wait, mmv$avail_mod_wait_queue);
      IFEND;
    IFEND;
    mws := cst_p^.jcb_p^.max_working_set_size;
    mws := mws DIV 16;
    IF mws < 16 THEN
      mws := 16;
    IFEND;


    check_aio_slowdown := (ijle_p^.job_page_queue_list [mmc$pq_job_working_set].count >
          (cst_p^.jcb_p^.max_working_set_size + mws));

    IF check_aio_slowdown OR (ijle_p^.job_page_queue_list [mmc$pq_job_working_set].count >
          mmv$max_working_set_size) THEN
      mmp$trim_job_working_set (ijle_p, cst_p^.jcb_p, FALSE {trim_to_swap_size=false} ,
            avail_mod_queue_overrun);
      IF tmv$ptl_p^ [cst_p^.taskid.index].new_task_status = tmc$ts_null THEN
        IF avail_mod_queue_overrun AND (cst_p^.ijl_ordinal <> jmv$system_ijl_ordinal)
{     } AND (NOT xcb_p^.critical_task) THEN
          tmp$queue_task (cst_p^.taskid, tmc$ts_avail_mod_q_full_wait, mmv$avail_mod_wait_queue);
        ELSEIF check_aio_slowdown THEN
          CASE pstatus OF
          = ps_found_in_avail, ps_found_in_avail_modified, ps_new_page_assigned, ps_valid_in_pt =
            IF ijle_p^.active_io_requests > mmv$maxws_aio_threshold THEN
              mmv$maxws_aio_count := mmv$maxws_aio_count + 1;
              xcb_p^.maxws_aio_slowdown := xcb_p^.maxws_aio_slowdown + 1;
              ijle_p^.maxws_aio_slowdown_display := ((mmv$maxws_aio_slowdown DIV
                    mmv$jws_queue_age_interval) + 1) MOD 256;
              tmp$cause_task_switch;
            IFEND;
          ELSE
          CASEND;
        IFEND;
      IFEND;
    ELSEIF (ijle_p^.active_io_requests > jmv$service_classes [ijle_p^.job_scheduler_data.service_class]^.
          attributes.aio_limit)
{    } AND (tmv$ptl_p^ [cst_p^.taskid.index].new_task_status = tmc$ts_null) THEN
      CASE pstatus OF
      = ps_found_in_avail, ps_found_in_avail_modified, ps_new_page_assigned, ps_valid_in_pt =
        tmp$cause_task_switch;
        mmv$aio_limit_count := mmv$aio_limit_count + 1;
      ELSE
      CASEND;
    IFEND;

    IF paging_statistics_ijl_p^.incremental_max_ws < ijle_p^.job_page_queue_list [mmc$pq_job_working_set].
          count THEN
      paging_statistics_ijl_p^.incremental_max_ws := ijle_p^.job_page_queue_list [mmc$pq_job_working_set].
            count;
      IF paging_statistics_ijl_p^.working_set_max_used < ijle_p^.job_page_queue_list [mmc$pq_job_working_set].
            count THEN
        paging_statistics_ijl_p^.working_set_max_used := ijle_p^.job_page_queue_list [mmc$pq_job_working_set].
              count;
      IFEND;
    IFEND;
    IF paging_statistics_xcb_p^.working_set_max_used < ijle_p^.job_page_queue_list [mmc$pq_job_working_set].
          count THEN
      paging_statistics_xcb_p^.working_set_max_used := ijle_p^.job_page_queue_list [mmc$pq_job_working_set].
            count;
    IFEND;

{Free queue must be replenished if the number of free+avail pages is below the threshold.
    check_free_queues (cst_p);

{Reset UTP in the Exchange Package.
    xcb_p^.xp.untranslatable_pointer := null_utp;

  PROCEND pr_pf;

?? TITLE := 'MMP$PROCESS_ASSIGN_PAGES_REQ', EJECT ??

  PROCEDURE [XDCL] mmp$process_assign_pages_req
    (VAR rb: mmt$rb_assign_pages;
         cst_p: ^ost$cpu_state_table);

    CASE rb.sub_reqcode OF
    = mmc$aprc_assign =
      mmp$process_assign_pages (rb, cst_p);

    = mmc$aprc_cancel_reserve =
      mmp$process_cancel_reserve (rb, cst_p);

    ELSE
      mtp$error_stop ('MM--ASSIGN PAGES--UNKNOWN REQUEST');
    CASEND;

  PROCEND mmp$process_assign_pages_req;

?? TITLE := 'MMP$PROCESS_ASSIGN_PAGES', EJECT ??

  PROCEDURE mmp$process_assign_pages
    (VAR rb: mmt$rb_assign_pages;
         cst_p: ^ost$cpu_state_table);

    CONST
      mmc$ap_ignore_maxws_and_trim = 131072;

    VAR
      assigned_pages_count: mmt$page_frame_index,
      aste_p: ^mmt$active_segment_table_entry,
      count: 1 .. 32,
      fde_p: gft$locked_file_desc_entry_p,
      first_pfti: mmt$page_frame_index,
      found: boolean,
      i: integer,
      ijle_p: ^jmt$initiated_job_list_entry,
      ipti: integer,
      memory_available: boolean,
      next_pfti: mmt$page_frame_index,
      page_status: gft$page_status,
      pfte_p: ^mmt$page_frame_table_entry,
      pfti: mmt$page_frame_index,
      pstatus: mmt$page_pull_status,
      psva: ost$system_virtual_address,
      pte_p: ^ost$page_table_entry,
      reject_offset: ost$segment_offset,
      requested_page_count: mmt$page_frame_index,
      remaining_pages_to_assign: mmt$page_frame_index,
      ste_p: ^mmt$segment_descriptor,
      stxe_p: ^mmt$segment_descriptor_extended,
      sva: ost$system_virtual_address,
      trim_pages: boolean,
      xsva: ost$system_virtual_address,
      valid_pages_in_memory: mmt$page_frame_index,
      pages_not_in_memory: mmt$page_frame_index,
      am_pages_in_memory: mmt$page_frame_index,
      avail_pages_in_memory: mmt$page_frame_index;

    rb.status.normal := TRUE;

    IF NOT mmv$tables_initialized THEN
      RETURN; {----->
    IFEND;

    mmp$verify_pva (^rb.pva, mmc$sat_write, rb.status);
    IF NOT rb.status.normal THEN
      RETURN; {----->
    IFEND;

    mmp$convert_pva (rb.pva, cst_p, sva, fde_p, aste_p, ste_p, stxe_p);
    IF stxe_p^.access_state <> mmc$sas_allow_access THEN
      IF stxe_p^.access_state = mmc$sas_inhibit_access THEN
        mtp$set_status_abnormal ('MM', mme$volume_unavailable, rb.status);
        RETURN; {----->
      ELSEIF stxe_p^.access_state = mmc$sas_terminate_access THEN
        mtp$set_status_abnormal ('DF', dfe$server_has_terminated, rb.status);
        RETURN; {----->
      IFEND;
    IFEND;

    IF aste_p = NIL THEN
      mtp$set_status_abnormal ('MM', mme$invalid_pva, rb.status);
      RETURN; {----->
    IFEND;

    requested_page_count := (#OFFSET (rb.pva) + rb.length - 1) DIV osv$page_size -
          (#OFFSET (rb.pva) DIV osv$page_size) + 1;

    IF (sva.offset + requested_page_count * osv$page_size) > fde_p^.file_limit THEN
      mtp$set_status_abnormal ('MM', mme$assign_length_too_long, rb.status);
      RETURN; {----->
    IFEND;

    IF (aste_p^.queue_id = mmc$pq_wired) OR (aste_p^.queue_id = mmc$pq_job_fixed) THEN
      mtp$set_status_abnormal ('MM', mme$wired_or_fixed_segs_illegal, rb.status);
      RETURN; {----->
    IFEND;

{ Round off the sva to a page boundary.

    sva.offset := (sva.offset DIV osv$page_size) * osv$page_size;
    xsva := sva;

    CASE fde_p^.media OF
    = gfc$fm_transient_segment =
      IF (aste_p^.pages_in_memory + requested_page_count > mmv$max_pages_no_file) THEN
        page_status := gfc$ps_job_mode_work_required;
        reject_offset := sva.offset + (requested_page_count * osv$page_size);
      ELSE
        page_status := gfc$ps_page_doesnt_exist;
      IFEND;
    = gfc$fm_mass_storage_file =
      mmv$last_segment_accessed := #SEGMENT (rb.pva);
      dmp$fetch_multi_page_status (fde_p, sva.offset, requested_page_count * osv$page_size,
            stxe_p^.file_limits_enforced, reject_offset, page_status);
    = gfc$fm_served_file =
      dfp$fetch_multi_page_status (fde_p, sva.offset, requested_page_count * osv$page_size, page_status);
      reject_offset := sva.offset + (requested_page_count - 1) * osv$page_size;
    CASEND;


    CASE page_status OF
    = gfc$ps_page_on_disk, gfc$ps_page_on_server, gfc$ps_page_doesnt_exist =

{ These are ok; do nothing

    = gfc$ps_temp_reject, gfc$ps_account_limit_exceeded =
      mtp$set_status_abnormal ('MM', mme$temporary_reject, rb.status);
      tmp$cause_task_switch;
      RETURN; {----->

    = gfc$ps_volume_unavailable =
      mtp$set_status_abnormal ('MM', mme$volume_unavailable, rb.status);
      RETURN; {----->

    = gfc$ps_server_terminated =
      mtp$set_status_abnormal ('DF', dfe$server_has_terminated, rb.status);
      RETURN; {----->

    = gfc$ps_server_allocate_required, gfc$ps_job_mode_work_required =
      set_assign_active (stxe_p, reject_offset);
      mmp$update_eoi (fde_p, reject_offset, mmc$uer_page_assigned);
      tmp$set_monitor_flag (cst_p^.taskid, mmc$mf_segment_mgr_flag, rb.status);
      mtp$set_status_abnormal ('MM', mme$dm_assign_active, rb.status);
      RETURN; {----->

    ELSE
      mtp$error_stop ('mm - unexpected DM error in assign pages');
    CASEND;


{ Calculate how many pages are needed; some may already be in the jws.

    valid_pages_in_memory := 0;
    pages_not_in_memory := 0;
    am_pages_in_memory := 0;
    avail_pages_in_memory := 0;

    FOR i := 1 TO requested_page_count DO
      #HASH_SVA (xsva, ipti, count, found);
      IF found THEN
        pte_p := ^mmv$pt_p^ [ipti];
        IF pte_p^.v THEN
          valid_pages_in_memory := valid_pages_in_memory + 1;
        ELSE
          pfti := pte_p^.rma * 512 DIV osv$page_size;
          pfte_p := ^mmv$pft_p^ [pfti];
          IF pfte_p^.queue_id = mmc$pq_avail THEN
            avail_pages_in_memory := avail_pages_in_memory + 1;
          ELSE
            am_pages_in_memory := am_pages_in_memory + 1;
          IFEND;
        IFEND;
      ELSE
        pages_not_in_memory := pages_not_in_memory + 1;
      IFEND;
      xsva.offset := xsva.offset + osv$page_size;
    FOREND;

    ijle_p := cst_p^.ijle_p;

{ Determine if the limit on the job's working set will allow the addition of the new pages
{ to be assigned (if pages go in the working set), and if there is memory available to assign.


    IF aste_p^.queue_id = mmc$pq_job_working_set THEN
      trim_pages := FALSE;
      IF rb.length <= mmc$ap_ignore_maxws_and_trim THEN
        trim_pages := TRUE;
      ELSEIF (((pages_not_in_memory + avail_pages_in_memory + am_pages_in_memory) +
            ijle_p^.job_page_queue_list [mmc$pq_job_working_set].count) >
            cst_p^.jcb_p^.max_working_set_size) OR ((pages_not_in_memory + ijle_p^.
            job_page_queue_list [mmc$pq_job_working_set].count) > mmv$max_working_set_size) THEN
        mtp$set_status_abnormal ('MM', mme$assign_length_too_long, rb.status);
        RETURN; {----->
      IFEND;
    IFEND;

{ Pages that are reassignable NOW (free + available) will be decremented by new pages assigned
{ (pages_not_in_memory) and available pages reassigned (avail_pages_in_memory).  Make sure
{ assigning that many pages for this job will not drive memory too low.

    memory_available := ((mmv$gpql [mmc$pq_free].pqle.count + mmv$gpql [mmc$pq_avail].pqle.count +
          ijle_p^.memory_reserve_request.reserved_page_count - mmv$reserved_page_count - pages_not_in_memory -
          avail_pages_in_memory) > mmv$aggressive_aging_level_2);

    IF memory_available THEN

{ Any pages in the requested range that are already in memory need to be marked as valid
{ and used; pages that are not in memory need to be assigned.  If specified, preset pages.

      xsva := sva;
      WHILE requested_page_count > 0 DO
        #HASH_SVA (xsva, ipti, count, found);
        IF found THEN
          pte_p := ^mmv$pt_p^ [ipti];
          IF NOT pte_p^.v THEN
            pfti := (pte_p^.rma * 512) DIV osv$page_size;
            pfte_p := ^mmv$pft_p^ [pfti];

            IF (pfte_p^.locked_page = mmc$lp_page_in_lock) OR
                  (pfte_p^.locked_page = mmc$lp_write_protected_lock) OR
                  (pfte_p^.locked_page = mmc$lp_server_allocate_lock) THEN
              IF (pfte_p^.active_io_count = 0) THEN
                mtp$error_stop ('MM - assign pages - queue no IO');
              IFEND;
              tmp$queue_task (cst_p^.taskid, tmc$ts_page_wait, pfte_p^.task_queue);
              tmp$reissue_monitor_request;
              RETURN; {----->
            IFEND;

            IF aste_p^.queue_id >= mmc$pq_job_base THEN
              pfte_p^.ijl_ordinal := cst_p^.ijl_ordinal;
            ELSE
              pfte_p^.ijl_ordinal := jmv$system_ijl_ordinal;
            IFEND;
            mmp$relink_page_frame (pfti, aste_p^.queue_id);
          ELSEIF rb.preset_pages THEN

{ Valid pages cannnot be preset; clear the valid bit, preset the page, and reset the valid bit.

            pte_p^.v := FALSE;
          IFEND;
          IF rb.preset_pages THEN
            mmp$preset_real_memory (xsva, fde_p^.preset_value);
            pte_p^.m := TRUE;
          IFEND;
          pte_p^.u := TRUE;
          pte_p^.v := TRUE;
          requested_page_count := requested_page_count - 1;
          xsva.offset := xsva.offset + osv$page_size;
        ELSE

{ The page is not already in memory; assign a new page to memory.  MMP$ASSIGN_PAGE_FRAME
{ will start at the sva passed to it and assign pages until the requested count is reached or
{ until one of the pages in the requested range is found already in memory.  The procedure
{ returns the number of pages that were assigned.  "Request" the lesser of remaining_pages_to_assign
{ and reassignable.now from mmp$assign_page_frame.  This is done so that assign_page_frame will not
{ reject for no_memory.  Checks were made above to be sure that there is enough memory for the request.
{ The number of new pages that needs to be assigned can change if make_pt_entry is freeing some of the
{ pages in the total requested range to prevent a page table full condition, so pages_not_in_memory
{ cannot be used as the number to assign.  The total requested_page_count can include pages that are
{ already in the working set (they do not need to be assigned), so requested_page_count cannot be
{ passed to mmp$assign_page_frame.  (If there are pages already in the job working set, requested_page_
{ count is too large and may cause a no_memory reject.)

          IF requested_page_count > mmv$reassignable_page_frames.now THEN
            remaining_pages_to_assign := mmv$reassignable_page_frames.now;
          ELSE
            remaining_pages_to_assign := requested_page_count;
          IFEND;

          mmp$assign_page_frame (xsva, aste_p, remaining_pages_to_assign, 0, assigned_pages_count, first_pfti,
                pstatus);

          IF assigned_pages_count > 0 THEN
            next_pfti := first_pfti;
            psva := xsva;
            WHILE next_pfti <> 0 DO

{ Preset pages if necessary; presetting can only be done one page at a time.  If the caller specified
{ preset, then mark the page modified so that it will be writen to disk if it ages out of the working
{ set.  Then if it is referenced the user will get the preset page.  If the preset is being done for
{ security on the assignment of new pages to memory (to prevent a user from being able to see what had
{ previously been on the page), then the modified bit does not need to be set.  If the page ages out
{ of memory, it will not be written to disk.  NOTE:  Some pages may have been assigned even if a
{ page table full condition was encountered.

              IF (rb.preset_pages) OR (ste_p^.ste.r2 > 3) OR (fde_p^.stack_for_ring > 3) THEN
                mmp$preset_real_memory (psva, fde_p^.preset_value);
                IF rb.preset_pages THEN
                  mmv$pt_p^ [mmv$pft_p^ [next_pfti].pti].m := TRUE;
                IFEND;
                psva.offset := psva.offset + osv$page_size;
              IFEND;

              mmv$pt_p^ [mmv$pft_p^ [next_pfti].pti].v := TRUE;
              next_pfti := mmv$pft_p^ [next_pfti].link.bkw;
            WHILEND;
            requested_page_count := requested_page_count - assigned_pages_count;
          IFEND;

          IF pstatus = ps_no_memory THEN
            mtp$error_stop ('MM - NO MEMORY IN ASSIGN PAGES');
          ELSEIF pstatus = ps_pt_full THEN
            mtp$set_status_abnormal ('MM', mme$page_table_full, rb.status);
            RETURN; {----->
          IFEND;

          xsva.offset := xsva.offset + (assigned_pages_count * osv$page_size);
        IFEND;
      WHILEND;
    ELSEIF ijle_p^.statistics.tasks_not_in_long_wait = 1 THEN

{ Memory is not available and no other tasks are ready, so cause the job to swap out.
{ Free the pages the job already has and request the total the job wants.  The requested
{ memory will be 'reserved' for the job when it swaps back in.

      mmp$initialize_find_next_pfti (sva, rb.length, exclude_partial_pages, psc_nominal_queue, aste_p, pfti);

      WHILE pfti <> 0 DO
        mmv$pt_p^ [mmv$pft_p^ [pfti].pti].v := FALSE;
        mmp$delete_pt_entry (pfti, TRUE);
        mmp$relink_page_frame (pfti, mmc$pq_free);
        mmp$find_next_pfti (pfti);
      WHILEND;
      mmp$sva_purge_all_page_map (sva);

      mtp$set_status_abnormal ('MM', mme$memory_not_avail_for_assign, rb.status);
      IF rb.waitopt = osc$wait THEN
        IF ijle_p^.entry_status <> jmc$ies_job_in_memory_non_swap THEN
          ijle_p^.memory_reserve_request.requested_page_count := requested_page_count;
          ijle_p^.memory_reserve_request.swapout_job := TRUE;
          jmp$set_scheduler_event (jmc$swap_job_for_memory_reserve);
          cst_p^.dispatch_control.call_dispatcher := TRUE;
        ELSE
          mtp$set_status_abnormal ('MM', mme$cannot_wait_for_memory, rb.status);
        IFEND;
      IFEND;
    ELSE

{ Memory is not available but other tasks are ready; set status so that this task will do
{ a short wait before reissuing the assign_pages request.  This will let the other tasks go
{ idle before causing the job to swap out until memory can be found to honor the assign request.

      mtp$set_status_abnormal ('MM', mme$wait_so_other_tasks_can_run, rb.status);
    IFEND;

{ If the job had been forced to swap out and wait for memory to be reserved for it, adjust
{ the reserved counts now.

    IF ijle_p^.memory_reserve_request.reserved_page_count > 0 THEN
      mmv$reserved_page_count := mmv$reserved_page_count - ijle_p^.memory_reserve_request.reserved_page_count;
      ijle_p^.memory_reserve_request.reserved_page_count := 0;
    IFEND;

  PROCEND mmp$process_assign_pages;

?? TITLE := 'MMP$PROCESS_CANCEL_RESERVE', EJECT ??

  PROCEDURE mmp$process_cancel_reserve
    (    rb: mmt$rb_assign_pages;
         cst_p: ^ost$cpu_state_table);

    VAR
      ijle_p: ^jmt$initiated_job_list_entry;


    ijle_p := cst_p^.ijle_p;

    ijle_p^.memory_reserve_request.swapout_job := FALSE;
    ijle_p^.memory_reserve_request.requested_page_count := 0;
    mmv$reserved_page_count := mmv$reserved_page_count - ijle_p^.memory_reserve_request.reserved_page_count;
    ijle_p^.memory_reserve_request.reserved_page_count := 0;

  PROCEND mmp$process_cancel_reserve;

?? TITLE := 'MMP$PROCESS_MOVE_PAGES_REQUEST', EJECT ??

{ The purpose of this monitor request is to move a page frame from one PVA to another.
{ When the request completes, all pages in the range from <pva_source> to <pva_source>+
{ <length-1> will have been moved to the range of addresses specified by <pva_destination>
{ to <pva_destination>+<length-1>.
{
{ CAUTION:  Be sure to fully understand how the 'move' is accomplished before changing this
{           procedure.  Because mmp$delete_pt_entry USES information and mmp$make_pt_entry
{           CHANGES information in the page frame table entry, it is necessary to 'move' the
{           page in the following order:
{             1.  Delete the source page table entry.
{             2.  Change the page frame table entry to reflect destination page information.
{             3.  Make the page table entry for the destination page.
{             4.  If necessary, relink the page frame to the queue for the destination segment.
{             5.  Set the valid bit on the destination page.

  PROCEDURE [XDCL] mmp$process_move_pages_request
    (VAR rb: mmt$rb_move_pages;
         cst_p: ^ost$cpu_state_table);

    VAR
      count: 1 .. 32,
      destination_aste_p: ^mmt$active_segment_table_entry,
      destination_fde_p: gft$locked_file_desc_entry_p,
      destination_pfti: mmt$page_frame_index,
      destination_pti: integer,
      destination_ste_p: ^mmt$segment_descriptor,
      destination_stxe_p: ^mmt$segment_descriptor_extended,
      destination_sva: ost$system_virtual_address,
      found: boolean,
      i: integer,
      job_ijle_p: ^jmt$initiated_job_list_entry,
      mpt_status: mmt$make_pt_entry_status,
      modified: boolean,
      number_of_pages_to_move: mmt$move_pages_page_count,
      page_status: gft$page_status,
      pfte_p: ^mmt$page_frame_table_entry,
      pfti: mmt$page_frame_index,
      pte_p: ^ost$page_table_entry,
      reject_offset: ost$segment_offset,
      source_aste_p: ^mmt$active_segment_table_entry,
      source_fde_p: gft$locked_file_desc_entry_p,
      source_pti: integer,
      source_ste_p: ^mmt$segment_descriptor,
      source_stxe_p: ^mmt$segment_descriptor_extended,
      source_sva: ost$system_virtual_address,
      status: syt$monitor_status,
      sva_of_last_page: ost$system_virtual_address,
      system_ijle_p: ^jmt$initiated_job_list_entry;


    rb.status.normal := TRUE;

    mmp$verify_pva (^rb.pva_source, mmc$sat_read_or_write, rb.status);
    IF NOT rb.status.normal THEN
      RETURN; {----->
    IFEND;

    mmp$convert_pva (rb.pva_source, cst_p, source_sva, source_fde_p, source_aste_p, source_ste_p,
          source_stxe_p);
    IF source_aste_p = NIL THEN
      mtp$set_status_abnormal ('MM', mme$invalid_pva, rb.status);
      RETURN; {----->
    IFEND;

    mmp$verify_pva (^rb.pva_destination, mmc$sat_write, rb.status);
    IF NOT rb.status.normal THEN
      RETURN; {----->
    IFEND;

    mmp$convert_pva (rb.pva_destination, cst_p, destination_sva, destination_fde_p, destination_aste_p,
          destination_ste_p, destination_stxe_p);
    IF destination_aste_p = NIL THEN
      mtp$set_status_abnormal ('MM', mme$invalid_pva, rb.status);
      RETURN; {----->
    IFEND;

    IF (rb.length <= 0) OR (rb.length > mmc$move_pages_max_req_length) THEN
      mtp$set_status_abnormal ('MM', mme$invalid_length_requested, rb.status);
      RETURN; {----->
    IFEND;

    IF (rb.length MOD osv$page_size <> 0) THEN
      mtp$set_status_abnormal ('MM', mme$length_not_page_size_mult, rb.status);
      RETURN; {----->
    IFEND;

    IF (source_sva.offset MOD osv$page_size <> 0) OR (destination_sva.offset MOD osv$page_size <> 0) THEN
      mtp$set_status_abnormal ('MM', mme$pva_not_on_page_boundary, rb.status);
      RETURN; {----->
    IFEND;

    IF (source_sva.offset + rb.length > osc$max_segment_length) OR
          (destination_sva.offset + rb.length > osc$max_segment_length) THEN
      mtp$set_status_abnormal ('MM', mme$invalid_pva_formed, rb.status);
      RETURN; {----->
    IFEND;

    job_ijle_p := jmf$ijle_p (cst_p^.ijl_ordinal);
    system_ijle_p := jmf$ijle_p (jmv$system_ijl_ordinal);

    rb.moved_modified_page_count := 0;
    rb.number_of_pages_moved := 0;

    number_of_pages_to_move := rb.length DIV osv$page_size;

    sva_of_last_page.offset := destination_sva.offset + rb.length;

    IF (destination_sva.offset + number_of_pages_to_move * osv$page_size) > destination_fde_p^.file_limit THEN
      mtp$set_status_abnormal ('MM', mme$read_write_beyond_msl, rb.status);
      RETURN; {----->
    IFEND;

    IF (destination_aste_p^.queue_id = mmc$pq_wired) OR (destination_aste_p^.queue_id = mmc$pq_job_fixed) THEN
      mtp$set_status_abnormal ('MM', mme$wired_or_fixed_segs_illegal, rb.status);
      RETURN; {----->
    IFEND;

    CASE destination_fde_p^.media OF
    = gfc$fm_transient_segment =
      IF (destination_aste_p^.pages_in_memory + number_of_pages_to_move > mmv$max_pages_no_file) THEN
        page_status := gfc$ps_job_mode_work_required;
        reject_offset := destination_sva.offset + (number_of_pages_to_move * osv$page_size);
      ELSE
        page_status := gfc$ps_page_doesnt_exist;
      IFEND;
    = gfc$fm_mass_storage_file =
      mmv$last_segment_accessed := #SEGMENT (rb.pva_destination);
      dmp$fetch_multi_page_status (destination_fde_p, destination_sva.offset,
            number_of_pages_to_move * osv$page_size, destination_stxe_p^.file_limits_enforced, reject_offset,
            page_status);
    = gfc$fm_served_file =
      dfp$fetch_multi_page_status (destination_fde_p, destination_sva.offset,
            number_of_pages_to_move * osv$page_size, page_status);
      reject_offset := destination_sva.offset + (number_of_pages_to_move - 1) * osv$page_size;
    CASEND;


    CASE page_status OF
    = gfc$ps_page_on_disk, gfc$ps_page_on_server, gfc$ps_page_doesnt_exist =

{ These are ok; do nothing

    = gfc$ps_temp_reject, gfc$ps_account_limit_exceeded =
      mtp$set_status_abnormal ('MM', mme$temporary_reject, rb.status);
      tmp$cause_task_switch;
      RETURN; {----->

    = gfc$ps_volume_unavailable =
      mtp$set_status_abnormal ('MM', mme$volume_unavailable, rb.status);
      RETURN; {----->

    = gfc$ps_server_terminated =
      mtp$set_status_abnormal ('DF', dfe$server_has_terminated, rb.status);
      RETURN; {----->

    = gfc$ps_server_allocate_required, gfc$ps_job_mode_work_required =
      set_assign_active (destination_stxe_p, reject_offset);
      mmp$update_eoi (destination_fde_p, reject_offset, mmc$uer_page_assigned);
      tmp$set_monitor_flag (cst_p^.taskid, mmc$mf_segment_mgr_flag, rb.status);
      mtp$set_status_abnormal ('MM', mme$dm_assign_active, rb.status);
      RETURN; {----->

    ELSE
      mtp$error_stop ('mm - unexpected DM error in assign pages');
    CASEND;


  /move_page/
    FOR i := 1 TO number_of_pages_to_move DO
      #HASH_SVA (source_sva, source_pti, count, found);
      IF NOT found THEN
        mtp$set_status_abnormal ('MM', mme$source_page_not_in_memory, rb.status);
        RETURN; {----->
      IFEND;

      pte_p := ^mmv$pt_p^ [source_pti];
      pfti := (pte_p^.rma * 512) DIV osv$page_size;
      pfte_p := ^mmv$pft_p^ [pfti];

      IF ((pfte_p^.active_io_count > 0) AND (pfte_p^.locked_page <> mmc$lp_page_in_lock)) THEN
        mtp$set_status_abnormal ('MM', mme$io_active_on_move_page, rb.status);
        RETURN; {----->

      ELSEIF (pte_p^.m) THEN
        IF (rb.reject_move_if_source_modified) THEN
          mtp$set_status_abnormal ('MM', mme$modified_source_page_reject, rb.status);
          RETURN; {----->
        ELSE
          rb.moved_modified_page_count := rb.moved_modified_page_count + 1;
        IFEND;
      IFEND;

{ Determine the value for modified_bit_option here; if no_change, the status of the modified bit
{ on the source page must be captured before the page table entry is deleted.

      IF rb.modified_bit_option = mmc$mp_set_modified THEN
        modified := TRUE;
      ELSEIF rb.modified_bit_option = mmc$mp_clear_modified THEN
        modified := FALSE;
      ELSE
        modified := pte_p^.m;
      IFEND;

{ Delete the source page

      mmp$delete_pt_entry (pfti, TRUE);

{ The destination page should not be in the page table; if it is, delete it.

      #HASH_SVA (destination_sva, destination_pti, count, found);
      IF found THEN
        destination_pfti := (mmv$pt_p^ [destination_pti].rma * 512) DIV osv$page_size;
        mmp$delete_pt_entry (destination_pfti, TRUE);
        mmp$relink_page_frame (destination_pfti, mmc$pq_free);
      IFEND;

{ Change the page frame table entry to the destination page information.

      pfte_p^.aste_p := destination_aste_p;
      pfte_p^.sva := destination_sva;

{ Make the page table entry for the destination page.  If the page table is full, restore the source page
{ (that cannot fail) and return.  Job mode will reissue the request, starting after the last page that
{ was moved.
{ NOTE:  make_pt_entry sets the USED bit and stores the ASID and RMA in the page table entry; it
{        also stores the PTI in the page frame table entry and increments the PAGES_IN_MEMORY count
{        in the active segment table entry.  Make_pt_entry does NOT set the valid bit in the
{        page table entry.


      mmp$make_pt_entry (destination_sva, pfti, destination_aste_p, pfte_p, mpt_status);
      IF mpt_status = mmc$mpt_page_table_full THEN
        mmv$async_work.pt_full_aste_p := destination_aste_p;
        mmv$async_work.pt_full_sva := destination_sva;
        mmv$async_work.pt_full := TRUE;
        mmv$time_to_call_mem_mgr := 0;
        osv$time_to_check_asyn := 0;
        pfte_p^.aste_p := source_aste_p;
        pfte_p^.sva := source_sva;
        mmp$make_pt_entry (source_sva, pfti, source_aste_p, pfte_p, mpt_status);
        IF mpt_status <> mmc$mpt_done THEN
          mtp$error_stop ('MOVE_PAGES -- COULD NOT REMAKE PAGE TABLE ENTRY');
        IFEND;
        mmv$pt_p^ [pfte_p^.pti].v := TRUE;
        mtp$set_status_abnormal ('MM', mme$page_table_full, rb.status);
        RETURN; {----->
      IFEND;

{ Store the correct ijl ordinal in the page frame table entry.  If the queue the source
{ page was in is not the same as the queue for the destination segment, relink the page.
{ If the page is going from the shared queue to a job working set, the pft.ijl_ordinal
{ must be changed and then the page relinked.  If the page is going from a job working
{ set to the shared queue, the page must be relinked and then the pft.ijl_ordinal
{ changed.  Adjust active_io_counts if the page changes queues.

      IF destination_aste_p^.queue_id >= mmc$pq_job_base THEN
        IF pfte_p^.queue_id >= mmc$pq_job_base THEN { MUST be the same JWS }
          IF pfte_p^.ijl_ordinal <> destination_aste_p^.ijl_ordinal THEN
            mtp$error_stop ('MM - MOVE PAGES - jws to jws move');
          IFEND;
        ELSE { shared queue to job working set }
          pfte_p^.ijl_ordinal := cst_p^.ijl_ordinal;
          mmp$relink_page_frame (pfti, destination_aste_p^.queue_id);
          job_ijle_p^.inhibit_swap_count := job_ijle_p^.inhibit_swap_count + pfte_p^.active_io_count;
          job_ijle_p^.active_io_page_count := job_ijle_p^.active_io_page_count + pfte_p^.active_io_count;
          system_ijle_p^.active_io_page_count := system_ijle_p^.active_io_page_count -
                pfte_p^.active_io_count;
        IFEND;
      ELSE { destination is the shared queue }
        IF pfte_p^.queue_id >= mmc$pq_job_base THEN { job working set to shared queue }
          mmp$relink_page_frame (pfti, destination_aste_p^.queue_id);
          pfte_p^.ijl_ordinal := jmv$system_ijl_ordinal;
          job_ijle_p^.inhibit_swap_count := job_ijle_p^.inhibit_swap_count - pfte_p^.active_io_count;
          job_ijle_p^.active_io_page_count := job_ijle_p^.active_io_page_count - pfte_p^.active_io_count;
          system_ijle_p^.active_io_page_count := system_ijle_p^.active_io_page_count +
                pfte_p^.active_io_count;
        IFEND;
      IFEND;

      mmv$pt_p^ [pfte_p^.pti].v := TRUE;
      mmv$pt_p^ [pfte_p^.pti].m := modified;

      rb.number_of_pages_moved := rb.number_of_pages_moved + 1;

      IF (rb.number_of_pages_moved < number_of_pages_to_move) THEN
        source_sva.offset := source_sva.offset + osv$page_size;
        destination_sva.offset := destination_sva.offset + osv$page_size;
      IFEND;

    FOREND /move_page/;

    IF mmv$multiple_caches OR mmv$multiple_page_maps THEN
      mmp$purge_all_cache_map_proc;
    ELSE
      mmp$sva_purge_all_cache (destination_sva);
      mmp$purge_all_map_proc;
    IFEND;

  PROCEND mmp$process_move_pages_request;

?? TITLE := 'MMP$PROCESS_ASSIGN_CONTIG_MEM', EJECT ??

  PROCEDURE [XDCL] mmp$process_assign_contig_mem
    (VAR rb: mmt$rb_assign_contig_memory;
         cst_p: ^ost$cpu_state_table);

    VAR
      assign_contiguous: boolean,
      assigned_pages: mmt$page_frame_index,
      aste_p: ^mmt$active_segment_table_entry,
      count: 1 .. 32,
      fde_p: gft$locked_file_desc_entry_p,
      first_pfti: mmt$page_frame_index,
      found: boolean,
      ignore_relink_status: mmt$relink_page_status,
      ijl_p: ^jmt$initiated_job_list_entry,
      index: integer,
      inhibit_io: boolean,
      io_id: mmt$io_identifier,
      ipti: integer,
      mcount: integer,
      pages_requested: 0 .. 0ffff(16),
      pfti: integer,
      pstatus: mmt$page_pull_status,
      qcb_p: ^mmt$page_queue_list_entry,
      rcount: integer,
      save_pfti: integer,
      starting_pfti: integer,
      ste_p: ^mmt$segment_descriptor,
      stxe_p: ^mmt$segment_descriptor_extended,
      sva: ost$system_virtual_address,
      test_sva: ost$system_virtual_address,
      write_status: mmt$write_page_to_disk_status;



    rb.status.normal := TRUE;
    mmp$verify_pva (^rb.process_virtual_address, mmc$sat_write, rb.status);
    IF NOT rb.status.normal THEN
      RETURN; {----->
    IFEND;

    mmp$convert_pva (rb.process_virtual_address, cst_p, sva, fde_p, aste_p, ste_p, stxe_p);
    pages_requested := (#OFFSET (rb.process_virtual_address) + rb.requested_length - 1) DIV
          osv$page_size - (#OFFSET (rb.process_virtual_address) DIV osv$page_size) + 1;
    test_sva.asid := sva.asid;
    test_sva.offset := ((sva.offset DIV osv$page_size) * osv$page_size);

{Verify that none of the pages are currently assigned.

    FOR index := 1 TO pages_requested DO
      #HASH_SVA (test_sva, ipti, count, found);
      IF found THEN
        mtp$set_status_abnormal ('MM', mme$pages_already_assigned, rb.status);
        RETURN; {----->
      IFEND;
      test_sva.offset := test_sva.offset + osv$page_size;
    FOREND;


    CASE rb.pass_count OF
    = mmc$scan_pft_for_free_or_avail, mmc$scan_pft_free_avail_notmod =
      IF rb.pass_count = mmc$scan_pft_for_free_or_avail THEN
        mmv$assign_contiguous_pass_cnt.pass_one_count := mmv$assign_contiguous_pass_cnt.pass_one_count + 1;
      ELSE
        mmv$assign_contiguous_pass_cnt.pass_two_count := mmv$assign_contiguous_pass_cnt.pass_two_count + 1;
      IFEND;
      starting_pfti := 0;
      scan_pft_for_pages (rb.pass_count, pages_requested, starting_pfti);

      IF (starting_pfti = 0) THEN
        mtp$set_status_abnormal ('MM', mme$unable_to_assign_contig_mem, rb.status);
        RETURN; {----->
      ELSEIF rb.pass_count = mmc$scan_pft_free_avail_notmod THEN

{ We have successfully found the requested pages. A second scan
{ of these pages is required, if a page is still usable, we go
{ ahead and remove it from the job working set.

      /loop/
        WHILE starting_pfti <> 0 DO
          pfti := starting_pfti;
          assign_contiguous := TRUE;

        /verify_pages_removable/
          FOR index := 1 TO pages_requested DO
            IF (mmv$pft_p^ [pfti].queue_id = mmc$pq_avail) OR
                  ((mmv$pft_p^ [pfti].queue_id = mmc$pq_free) AND (mmv$pft_p^ [pfti].active_io_count = 0))
                  THEN
              pfti := pfti + 1;
              CYCLE /verify_pages_removable/; {----->
            ELSE
              IF ((mmv$pft_p^ [pfti].queue_id >= mmc$pq_shared_first) AND
                    (mmv$pft_p^ [pfti].queue_id <= mmc$pq_shared_last)) OR
                    (mmv$pft_p^ [pfti].queue_id = mmc$pq_job_working_set) THEN
                IF mmv$pft_p^ [pfti].locked_page = mmc$lp_not_locked THEN
                  IF (NOT mmv$pt_p^ [mmv$pft_p^ [pfti].pti].m) AND (mmv$pft_p^ [pfti].active_io_count =
                        0) THEN
                    mmp$get_inhibit_io_status (mmv$pft_p^ [pfti].ijl_ordinal, TRUE {lock ajl} , inhibit_io,
                          ijl_p);
                    IF NOT inhibit_io THEN
                      mmp$remove_page_from_jws (pfti, ijl_p, FALSE {= relink when Avail Mod Q max} , mcount,
                            rcount, ignore_relink_status);
                      pfti := pfti + 1;
                      jmp$unlock_ajl (ijl_p);
                      CYCLE /verify_pages_removable/; {----->
                    IFEND;
                  IFEND;
                IFEND;
              IFEND;
            IFEND;
            assign_contiguous := FALSE;
            EXIT /verify_pages_removable/; {----->
          FOREND /verify_pages_removable/;

          IF assign_contiguous THEN
            EXIT /loop/; {----->
          ELSE
            starting_pfti := pfti;
            scan_pft_for_pages (rb.pass_count, pages_requested, starting_pfti);
          IFEND;
        WHILEND /loop/;
      IFEND;

      IF starting_pfti = 0 THEN
        mtp$set_status_abnormal ('MM', mme$unable_to_assign_contig_mem, rb.status);
        RETURN; {----->
      IFEND;

      pfti := starting_pfti;
      FOR index := 1 TO pages_requested DO
        IF mmv$pft_p^ [pfti].queue_id = mmc$pq_avail THEN
          mmp$delete_pt_entry (pfti, TRUE);
          mmp$relink_page_frame (pfti, mmc$pq_free);
        IFEND;
        pfti := pfti + 1;
      FOREND;

      mmp$assign_page_frame (sva, aste_p, pages_requested, starting_pfti, assigned_pages, first_pfti,
            pstatus);

{ If pages assigned was not equal to pages requested, (usually means page-table-full), free
{ any pages assigned and cause the request to be reissued.

      IF pages_requested <> assigned_pages THEN
        pfti := first_pfti;
        IF assigned_pages > 0 THEN
          WHILE pfti <> 0 DO
            mmp$delete_pt_entry (pfti, TRUE);
            save_pfti := mmv$pft_p^ [pfti].link.bkw;
            mmp$relink_page_frame (pfti, mmc$pq_free);
            pfti := save_pfti;
          WHILEND;
        IFEND;
        cst_p^.dispatch_control.asynchronous_interrupts_pending := TRUE;
        tmp$cause_task_switch;
        tmp$reissue_monitor_request;
        mmv$assign_contig_reject := mmv$assign_contig_reject + 1;
        RETURN; {----->
      IFEND;

{ Each of the pages assigned must be preset. After the page is preset,
{ the valid bit in the page table is set.

      WHILE starting_pfti <> 0 DO
        mmp$preset_real_memory (mmv$pft_p^ [starting_pfti].sva, fde_p^.preset_value);
        mmv$pt_p^ [mmv$pft_p^ [starting_pfti].pti].v := TRUE;
        starting_pfti := mmv$pft_p^ [starting_pfti].link.bkw;
      WHILEND;

{ If the pages were assigned to a job-fixed segment, the pages
{ must be moved from the end of the job-fixed page queue to the
{ beginning of the job-fixed page queue. Moving the pages will
{ make swapping and job recovery of jobs with contiguous pages
{ assigned possible. A count of the job-fixed contiguous pages
{ is maintained in the IJL entry of the job.

      IF mmv$pft_p^ [first_pfti].queue_id = mmc$pq_job_fixed THEN
        ijl_p := jmf$ijle_p (mmv$pft_p^ [first_pfti].ijl_ordinal);
        qcb_p := ^ijl_p^.job_page_queue_list [mmc$pq_job_fixed];
        mmv$pft_p^ [qcb_p^.link.bkw].link.fwd := qcb_p^.link.fwd;
        save_pfti := qcb_p^.link.fwd;
        qcb_p^.link.fwd := mmv$pft_p^ [first_pfti].link.fwd;
        mmv$pft_p^ [first_pfti].link.fwd := 0;
        mmv$pft_p^ [qcb_p^.link.fwd].link.bkw := 0;
        mmv$pft_p^ [save_pfti].link.bkw := qcb_p^.link.bkw;
        qcb_p^.link.bkw := first_pfti;
        ijl_p^.job_fixed_contiguous_pages := ijl_p^.job_fixed_contiguous_pages + pages_requested;
      IFEND;

{ The following global variable maintains a count of all of the contiguous
{ pages currently assigned in the system. This count includes both wired and
{ job-fixed contiguous pages assigned.

      total_contig_pages_assigned := total_contig_pages_assigned + pages_requested;

    = mmc$scan_pft_write_mod_pages =

{ Pass three will simply scan through the page frame table
{ and write any pages in a job working set or the shared
{ queue which are not locked and the swap status of the
{ job does not prohibit us from writing the page to disk.

      io_id.specified := FALSE;

{write pages to disk

      FOR pfti := UPPERBOUND (mmv$pft_p^) TO LOWERBOUND (mmv$pft_p^) DO
        IF ((mmv$pft_p^ [pfti].queue_id >= mmc$pq_shared_first) AND
              (mmv$pft_p^ [pfti].queue_id <= mmc$pq_shared_last)) OR
              (mmv$pft_p^ [pfti].queue_id = mmc$pq_job_working_set) THEN
          IF mmv$pft_p^ [pfti].locked_page = mmc$lp_not_locked THEN
            mmp$get_inhibit_io_status (mmv$pft_p^ [pfti].ijl_ordinal, TRUE {lock ajl} , inhibit_io, ijl_p);
            IF NOT inhibit_io THEN
              gfp$mtr_get_locked_fde_p (mmv$pft_p^ [pfti].aste_p^.sfid, ijl_p, fde_p);
              mmp$write_page_to_disk (fde_p, pfti, ioc$write_page, io_id, mmv$multi_page_write, write_status);
              jmp$unlock_ajl (ijl_p);
            IFEND;
          IFEND;
        IFEND;
      FOREND;
      mmv$assign_contiguous_pass_cnt.pass_three_count := mmv$assign_contiguous_pass_cnt.pass_three_count + 1;
    ELSE
      mtp$set_status_abnormal ('MM', mme$invalid_request, rb.status);
    CASEND;

  PROCEND mmp$process_assign_contig_mem;
?? TITLE := 'SCAN_PFT_FOR_PAGES', EJECT ??

  PROCEDURE scan_pft_for_pages
    (    pass_count: mmt$assign_contig_pass_ident;
         pages_requested: 0 .. 0ffff(16);
     VAR starting_pfti: integer);

    VAR
      ijl_p: ^jmt$initiated_job_list_entry,
      job_found: boolean,
      inhibit_io: boolean,
      page_count,
      pfti: integer;

    IF starting_pfti = 0 THEN
      IF mmv$image_file.active THEN
        pfti := (osv$180_memory_limits.deadstart_upper DIV osv$page_size) - 1;
      ELSE
        pfti := UPPERBOUND (mmv$pft_p^) + 1;
      IFEND;
    ELSE
      pfti := starting_pfti;
    IFEND;
    page_count := 0;

{ Pass one and two will check for free or available pages.
{ In addition to searching for free or available pages, pass two
{ will search for pages it can remove from a job working set.
{ Pages are not removed from a job working set until we are
{ reasonably sure we can assign the requested number of
{ contiguous pages.

  /search_loop/
    WHILE (pfti > LOWERBOUND (mmv$pft_p^)) AND (page_count < pages_requested) DO
      pfti := pfti - 1;

{Pass one will check for free or available pages.

      IF (mmv$pft_p^ [pfti].queue_id = mmc$pq_avail) OR ((mmv$pft_p^ [pfti].queue_id = mmc$pq_free) AND
            (mmv$pft_p^ [pfti].active_io_count = 0)) THEN
        page_count := page_count + 1;
        CYCLE /search_loop/; {----->
      ELSE
        IF pass_count = mmc$scan_pft_free_avail_notmod THEN
          IF ((mmv$pft_p^ [pfti].queue_id >= mmc$pq_shared_first) AND
                (mmv$pft_p^ [pfti].queue_id <= mmc$pq_shared_last)) OR
                (mmv$pft_p^ [pfti].queue_id = mmc$pq_job_working_set) THEN
            IF mmv$pft_p^ [pfti].locked_page = mmc$lp_not_locked THEN
              IF (NOT mmv$pt_p^ [mmv$pft_p^ [pfti].pti].m) AND (mmv$pft_p^ [pfti].active_io_count = 0) THEN
                mmp$get_inhibit_io_status (mmv$pft_p^ [pfti].ijl_ordinal, FALSE {lock ajl} , inhibit_io,
                      ijl_p);
                IF NOT inhibit_io THEN
                  page_count := page_count + 1;
                  CYCLE /search_loop/; {----->
                IFEND;
              IFEND;
            IFEND;
          IFEND;
        ELSE

{ If the swap status of the job is swap resident, the jobs resources can be freed.
{ The page must be in the job working set--pages in other queues are not necessarily freed (e.g. wired).

          ijl_p := jmf$ijle_p (mmv$pft_p^ [pfti].ijl_ordinal);
          IF (ijl_p^.swap_status = jmc$iss_swapped_io_complete) AND
                (mmv$pft_p^ [pfti].queue_id = mmc$pq_job_working_set) THEN

{swap status is swap resident

            jsp$free_swapped_jobs_memory (mmv$pft_p^ [pfti].ijl_ordinal, {S2_QUEUE_ONLY} TRUE, job_found);
            page_count := page_count + 1;
            CYCLE /search_loop/; {----->
          IFEND;
        IFEND;
      IFEND;
      page_count := 0;
    WHILEND /search_loop/;

    IF page_count < pages_requested THEN
      starting_pfti := 0;
    ELSE
      starting_pfti := pfti;
    IFEND;

  PROCEND scan_pft_for_pages;

?? TITLE := 'MMP$ADVISE_REQUEST_PROCESSOR', EJECT ??

{--------------------------------------------------------------------------------------------------------
{Name:
{  mmp$advise_request_processor
{Purpose:
{  This procedure processes ADVISE requests from job mode.
{Input:
{  rb - request block from job mode
{Output:
{  none
{Error Codes:
{  none
{notes:
{  - No error is generated if some (or all) of the pages are
{    already assigned.
{--------------------------------------------------------------------------------------------------------

  PROCEDURE [XDCL] mmp$advise_request_processor
    (VAR rb: mmt$rb_advise;
         cst_p: ^ost$cpu_state_table);

    VAR
      avail_mod_queue_overrun: boolean,
      fde_p: gft$locked_file_desc_entry_p,
      ijle_p: ^jmt$initiated_job_list_entry,
      ste_p: ^mmt$segment_descriptor,
      stxe_p: ^mmt$segment_descriptor_extended,
      io_id: mmt$io_identifier,
      check_aio_slowdown: boolean,
      cptime: ost$cp_time_value,
      page_count: integer,
      page_in_count: mmt$page_frame_index,
      pfti: mmt$page_frame_index,
      aste_p: ^mmt$active_segment_table_entry,
      pstatus: mmt$page_pull_status,
      sva: ost$system_virtual_address,
      xcb_p: ^ost$execution_control_block;

    rb.status.normal := TRUE;

    IF NOT mmv$tables_initialized THEN
      RETURN {----->
    IFEND;

    io_id.specified := FALSE;

{Process the ADVISE OUT part of the request.

    IF ((rb.reqcode = syc$rc_advise_out) OR (rb.reqcode = syc$rc_advise_out_in)) AND (rb.out_length > 0) THEN
      mmp$verify_pva (^rb.out_pva, mmc$sat_read_or_write, rb.status);
      IF NOT rb.status.normal THEN
        RETURN; {----->
      IFEND;
      mmp$convert_pva (rb.out_pva, cst_p, sva, fde_p, aste_p, ste_p, stxe_p);
      IF stxe_p^.access_state <> mmc$sas_allow_access THEN
        RETURN; {----->
      IFEND;
      IF (aste_p^.queue_id <> mmc$pq_job_working_set) AND (aste_p^.queue_id < mmc$pq_shared_first) AND
            (aste_p^.queue_id > mmc$pq_shared_last) THEN
        mtp$set_status_abnormal ('MM', mme$segment_not_pageable, rb.status);
        RETURN; {----->
      IFEND;

      IF fde_p^.media = gfc$fm_transient_segment THEN {!Should this be ignored??}
        mtp$set_status_abnormal ('MM', mme$segment_not_assigned_device, rb.status);
        RETURN; {----->
      IFEND;
      mmp$remove_pages_working_set (sva, rb.out_length + #OFFSET (rb.out_pva) - sva.offset, aste_p,
            page_count);
    IFEND;


{Process the ADVISE IN part of the request.

    ijle_p := cst_p^.ijle_p;
    xcb_p := cst_p^.xcb_p;
    IF ((rb.reqcode = syc$rc_advise_in) OR (rb.reqcode = syc$rc_advise_out_in)) AND (rb.in_length > 0) THEN
      mmp$verify_pva (^rb.in_pva, mmc$sat_read_or_write, rb.status);
      IF NOT rb.status.normal THEN
        RETURN; {----->
      IFEND;
      mmp$convert_pva (rb.in_pva, cst_p, sva, fde_p, aste_p, ste_p, stxe_p);
      IF stxe_p^.access_state <> mmc$sas_allow_access THEN
        RETURN; {----->
      IFEND;
      page_count := (#OFFSET (rb.in_pva) + rb.in_length - 1) DIV osv$page_size -
            (#OFFSET (rb.in_pva) DIV osv$page_size) + 1;

    /advise_in/
      WHILE mmv$reassignable_page_frames.now >= mmv$aggressive_aging_level_2 DO
        mmp$page_pull_hash_sva (sva, aste_p, page_in_count, pstatus, pfti);
        IF page_in_count = 0 THEN
          mmp$page_pull (sva, fde_p, cst_p, aste_p, stxe_p, io_id, page_count, ioc$read_page, TRUE,
                page_in_count, pstatus, pfti);
        IFEND;
        page_count := page_count - page_in_count;
        CASE pstatus OF
        = ps_no_memory, ps_low_on_memory =
          tmp$cause_task_switch;
          EXIT /advise_in/; {----->
        = ps_io_temp_reject =
          tmp$cause_task_switch;
          EXIT /advise_in/; {----->
        = ps_pt_full =
          cst_p^.dispatch_control.asynchronous_interrupts_pending := TRUE;
          tmp$cause_task_switch;
          EXIT /advise_in/; {----->
        = ps_read_beyond_eoi =
          mtp$set_status_abnormal ('MM', mme$read_beyond_eoi, rb.status);
          EXIT /advise_in/; {----->
        = ps_beyond_file_limit =
          mtp$set_status_abnormal ('MM', mme$read_write_beyond_msl, rb.status);
          EXIT /advise_in/; {----->
        = ps_no_extend_permission =
          mtp$set_status_abnormal ('MM', mme$write_beyond_eoi_no_append, rb.status);
          EXIT /advise_in/; {----->
        = ps_volume_unavailable, ps_server_terminated, ps_job_work_required =
          EXIT /advise_in/; {----->
        = ps_allocate_required_on_server, ps_new_page_assigned =
          ijle_p^.statistics.paging_statistics.new_pages_assigned :=
                ijle_p^.statistics.paging_statistics.new_pages_assigned + page_in_count;
          xcb_p^.paging_statistics.new_pages_assigned := xcb_p^.paging_statistics.new_pages_assigned +
                page_in_count;
          mmv$paging_statistics.ai_pages.new := mmv$paging_statistics.ai_pages.new + page_in_count;
          IF pstatus = ps_allocate_required_on_server THEN
            EXIT /advise_in/; {----->
          IFEND;
        = ps_found_on_server =
          ijle_p^.statistics.paging_statistics.pages_from_server :=
                ijle_p^.statistics.paging_statistics.pages_from_server + page_in_count;
          xcb_p^.paging_statistics.pages_from_server := xcb_p^.paging_statistics.pages_from_server +
                page_in_count;
          mmv$paging_statistics.ai_pages.server := mmv$paging_statistics.ai_pages.server + page_in_count;

        = ps_found_in_avail, ps_found_in_avail_modified =
          ijle_p^.statistics.paging_statistics.pages_reclaimed_from_queue :=
                ijle_p^.statistics.paging_statistics.pages_reclaimed_from_queue + page_in_count;
          xcb_p^.paging_statistics.pages_reclaimed_from_queue :=
                xcb_p^.paging_statistics.pages_reclaimed_from_queue + page_in_count;
          mmv$paging_statistics.ai_pages.reclaim := mmv$paging_statistics.ai_pages.reclaim + page_in_count;

        = ps_found_on_disk =
          ijle_p^.statistics.paging_statistics.page_in_count :=
                ijle_p^.statistics.paging_statistics.page_in_count + page_in_count;
          xcb_p^.paging_statistics.page_in_count := xcb_p^.paging_statistics.page_in_count + page_in_count;
          mmv$paging_statistics.ai_pages.disk := mmv$paging_statistics.ai_pages.disk + page_in_count;
          IF ijle_p^.active_io_requests > mmv$advise_in_aio_limit THEN
            EXIT /advise_in/; {----->
          IFEND;
        ELSE
        CASEND;
        IF page_count <= 0 THEN
          EXIT /advise_in/; {----->
        IFEND;
        sva.offset := sva.offset + (page_in_count * osv$page_size);
      WHILEND /advise_in/;


{Scan the JWS if the job cp time exceeds the aging threshold.
      IF mmv$aging_algorithm >= 4 THEN
        cptime := ijle_p^.statistics.cp_time.time_spent_in_job_mode;
      ELSE
        cptime := ijle_p^.statistics.cp_time.time_spent_in_mtr_mode +
              ijle_p^.statistics.cp_time.time_spent_in_job_mode;
      IFEND;

      IF cptime > cst_p^.jcb_p^.cptime_next_age_working_set THEN
        mmp$age_job_working_set (ijle_p, cst_p^.jcb_p, NOT mmv$wait_on_avail_mod_q_full,
              avail_mod_queue_overrun);
        IF avail_mod_queue_overrun AND (cst_p^.ijl_ordinal <> jmv$system_ijl_ordinal)
{     } AND (NOT xcb_p^.critical_task)
{     } AND (tmv$ptl_p^ [cst_p^.taskid.index].new_task_status = tmc$ts_null) THEN
          tmp$queue_task (cst_p^.taskid, tmc$ts_avail_mod_q_full_wait, mmv$avail_mod_wait_queue);
        IFEND;
      IFEND;

      check_aio_slowdown := (ijle_p^.job_page_queue_list [mmc$pq_job_working_set].count >
            cst_p^.jcb_p^.max_working_set_size);
      IF check_aio_slowdown OR (ijle_p^.job_page_queue_list [mmc$pq_job_working_set].count >
            mmv$max_working_set_size) THEN
        mmp$trim_job_working_set (ijle_p, cst_p^.jcb_p, FALSE {trim_to_swap_size=false} ,
              avail_mod_queue_overrun);
        IF avail_mod_queue_overrun AND (cst_p^.ijl_ordinal <> jmv$system_ijl_ordinal)
{     } AND (NOT xcb_p^.critical_task)
{     } AND (tmv$ptl_p^ [cst_p^.taskid.index].new_task_status = tmc$ts_null) THEN
          tmp$queue_task (cst_p^.taskid, tmc$ts_avail_mod_q_full_wait, mmv$avail_mod_wait_queue);

        ELSEIF check_aio_slowdown AND (ijle_p^.active_io_requests > mmv$maxws_aio_threshold)
{     } AND (tmv$ptl_p^ [cst_p^.taskid.index].new_task_status = tmc$ts_null) THEN
          mmv$maxws_aio_count := mmv$maxws_aio_count + 1;
          xcb_p^.maxws_aio_slowdown := xcb_p^.maxws_aio_slowdown + 1;
          ijle_p^.maxws_aio_slowdown_display := ((mmv$maxws_aio_slowdown DIV mmv$jws_queue_age_interval) +
                1) MOD 256;
          tmp$cause_task_switch;
        IFEND;

      ELSEIF (ijle_p^.active_io_requests > jmv$service_classes [ijle_p^.job_scheduler_data.service_class]^.
            attributes.aio_limit)
{   } AND (tmv$ptl_p^ [cst_p^.taskid.index].new_task_status = tmc$ts_null) THEN
        tmp$cause_task_switch;
        mmv$aio_limit_count := mmv$aio_limit_count + 1;
      IFEND;

      IF ijle_p^.statistics.paging_statistics.incremental_max_ws < ijle_p^.
            job_page_queue_list [mmc$pq_job_working_set].count THEN
        ijle_p^.statistics.paging_statistics.incremental_max_ws := ijle_p^.
              job_page_queue_list [mmc$pq_job_working_set].count;
        IF ijle_p^.statistics.paging_statistics.working_set_max_used < ijle_p^.
              job_page_queue_list [mmc$pq_job_working_set].count THEN
          ijle_p^.statistics.paging_statistics.working_set_max_used := ijle_p^.
                job_page_queue_list [mmc$pq_job_working_set].count;
        IFEND;
      IFEND;
      IF xcb_p^.paging_statistics.working_set_max_used < ijle_p^.job_page_queue_list [mmc$pq_job_working_set].
            count THEN
        xcb_p^.paging_statistics.working_set_max_used := ijle_p^.job_page_queue_list [mmc$pq_job_working_set].
              count;
      IFEND;

{Free queue must be replenished if the number of free+avail pages is below the threshold.

      check_free_queues (cst_p);

    IFEND; {advise-in processing}

  PROCEND mmp$advise_request_processor;
?? TITLE := 'MMP$ASSIGN_PAGE_TO_MONITOR', EJECT ??

{-------------------------------------------------------------------------

*copyc mmh$assign_page_to_monitor

{-------------------------------------------------------------------------


  PROCEDURE [XDCL] mmp$assign_page_to_monitor
    (    p: ^cell;
         page_count: integer;
         preset: boolean;
     VAR status: syt$monitor_status);

    VAR
      i: integer,
      aste_p: ^mmt$active_segment_table_entry,
      pstatus: mmt$page_pull_status,
      sva: ost$system_virtual_address,
      cell_p: ^cell,
      count: mmt$page_frame_index,
      pfti: mmt$page_frame_index;

    status.normal := TRUE;
    sva.offset := (#OFFSET (p) DIV osv$page_size) * osv$page_size;
    cell_p := ^mtv$monitor_segment_table;
    #PURGE_BUFFER (osc$pva_purge_segment_cache, cell_p);
    sva.asid := mtv$monitor_segment_table.st [#SEGMENT (p)].ste.asid;

    mmp$aste_pointer (sva.asid, aste_p);
    FOR i := 1 TO page_count DO
      mmp$assign_page_frame (sva, aste_p, 1, 0, count, pfti, pstatus);
      IF pstatus = ps_done THEN
        IF preset THEN
          mmp$preset_real_memory (sva, pmc$initialize_to_zero);
        IFEND;
        mmv$pt_p^ [mmv$pft_p^ [pfti].pti].v := TRUE;
      ELSE
        mmp$delete_page_from_monitor (p, i - 1, status);
        IF (pstatus = ps_pt_full) THEN
          mtp$set_status_abnormal ('MM', mme$page_table_full, status);
        ELSEIF (pstatus = ps_no_memory) THEN
          mtp$set_status_abnormal ('MM', mme$no_free_pages, status);
        ELSE
          mtp$error_stop ('MM - unexpected reject on assign_page_to_monitor');
        IFEND;
        RETURN; {----->
      IFEND;
      sva.offset := sva.offset + osv$page_size;
    FOREND;

  PROCEND mmp$assign_page_to_monitor;
?? TITLE := 'MMP$DELETE_PAGE_FROM_MONITOR', EJECT ??

{-------------------------------------------------------------------------

*copyc mmh$delete_page_from_monitor

{-------------------------------------------------------------------------


  PROCEDURE [XDCL] mmp$delete_page_from_monitor
    (    p: ^cell;
         page_count: integer;
     VAR status: syt$monitor_status);

    VAR
      pva: ^cell,
      rma: integer,
      i: integer,
      pfti: mmt$page_frame_index;

    status.normal := TRUE;
    pva := p;

    FOR i := 1 TO page_count DO
      #real_memory_address (pva, rma);
      IF rma < 0 THEN
        mtp$error_stop ('MM - bad pva on delete_page_from_monitor');
      IFEND;
      pfti := rma DIV osv$page_size;
      mmp$delete_pt_entry (pfti, TRUE);
      mmp$relink_page_frame (pfti, mmc$pq_free);
      pva := #ADDRESS (1, #SEGMENT (p), #OFFSET (pva) + osv$page_size);
    FOREND;

    #PURGE_BUFFER (osc$pva_purge_all_page_seg_map, pva);

  PROCEND mmp$delete_page_from_monitor;
?? TITLE := 'MMP$XCHECK_QUEUES', EJECT ??

  VAR
    mmv$check_queues: [XDCL, #GATE] integer := 0,
    mmv$last_xchecked_queue: [XDCL, #GATE] integer := 1;

  PROCEDURE [XDCL] mmp$xcheck_queues;

    PROCEDURE check_queue
      (    qcb: mmt$page_queue_list_entry;
           qid: integer);

      VAR
        i: integer,
        pfti,
        prev_pfti: integer;

      pfti := qcb.link.bkw;
      prev_pfti := 0;
      FOR i := 1 TO qcb.count DO
        IF pfti = 0 THEN
          mtp$error_stop ('MM - check queue, qcb count');
        IFEND;
        IF mmv$pft_p^ [pfti].link.fwd <> prev_pfti THEN
          mtp$error_stop ('MM - check queue, bad fwd');
        IFEND;
        IF mmv$pft_p^ [pfti].queue_id <> qid THEN
          mtp$error_stop ('MM - check queue, bad qid');
        IFEND;
        prev_pfti := pfti;
        pfti := mmv$pft_p^ [pfti].link.bkw;
      FOREND;
      IF pfti <> 0 THEN
        mtp$error_stop ('MM - check queue, bad count2');
      IFEND;
    PROCEND check_queue;

    VAR
      cst_p: ^ost$cpu_state_table,
      pit: integer,
      last_check_time: [STATIC] integer := 0,
      i: integer;

    IF (mmv$check_queues > 0) AND mmv$tables_initialized THEN
      pit := #READ_REGISTER (osc$pr_process_interval_timer);
      IF (mmv$check_queues > 1) OR (last_check_time - #FREE_RUNNING_CLOCK (0) > 1000000) THEN
        FOR i := 0 TO mmv$last_xchecked_queue DO
          check_queue (mmv$gpql [i].pqle, i);
        FOREND;
      IFEND;
{     mtp$cst_p (cst_p);
      cst_p := mtf$cst_p ();
      IF cst_p^.xcb_p <> NIL THEN
        FOR i := mmc$pq_job_fixed TO mmc$pq_job_working_set DO
          check_queue (cst_p^.ijle_p^.job_page_queue_list [i], i);
        FOREND;
      IFEND;
      #WRITE_REGISTER (osc$pr_process_interval_timer, pit);
      last_check_time := #FREE_RUNNING_CLOCK (0);
    IFEND;
  PROCEND mmp$xcheck_queues;

?? TITLE := 'MMP$PROCESS_VOLUME_UNAVAILABLE', EJECT ??

  PROCEDURE [XDCL] mmp$process_volume_unavailable
    (    xcb_p: ^ost$execution_control_block;
         reset_p_register: boolean);

    VAR
      fde_p: gft$file_desc_entry_p,
      ijle_p: ^jmt$initiated_job_list_entry,
      sfd_p: dft$server_descriptor_p,
      stxe_p: ^mmt$segment_descriptor_extended,
      segnum: ost$segment,
      status: syt$monitor_status;

    segnum := #SEGMENT (xcb_p^.page_wait_info.pva);
    stxe_p := mmp$get_sdtx_entry_p (xcb_p, segnum);

    ijle_p := jmf$ijle_p (tmv$ptl_p^ [xcb_p^.global_task_id.index].ijl_ordinal);
    gfp$mtr_get_fde_p (stxe_p^.sfid, ijle_p, fde_p);
    IF fde_p^.media = gfc$fm_served_file THEN
      dfp$get_served_file_desc_p (fde_p, sfd_p);
      IF (sfd_p^.header.file_state = dfc$awaiting_recovery) AND
            (xcb_p^.xp.trap_enable = osc$traps_enabled_delay) THEN
        xcb_p^.xp.trap_enable := osc$traps_enabled;
        tmp$set_monitor_flag (xcb_p^.global_task_id, mmc$mf_volume_unavailable, status);
        RETURN; {----->
      IFEND;
    IFEND;

    IF (xcb_p^.xp.trap_enable <> osc$traps_enabled) OR

{ Could have used p_register.ring <= 3, but that has problems too

    (segnum < mmc$first_loader_predefined_seg) OR (mmc$sa_stack IN stxe_p^.software_attribute_set) THEN
      IF (tmv$ptl_p^ [xcb_p^.global_task_id.index].status < tmc$ts_first_external_queue) AND
            (tmv$ptl_p^ [xcb_p^.global_task_id.index].new_task_status < tmc$ts_first_external_queue) THEN
        tmp$queue_task (xcb_p^.global_task_id, tmc$ts_volume_unavailable, mmv$volume_wait_queue);
        IF (xcb_p^.system_table_lock_count > 255) OR (xcb_p^.critical_task) OR
              (tmv$ptl_p^ [xcb_p^.global_task_id.index].ijl_ordinal = jmv$system_ijl_ordinal) THEN
          mtp$step_unstep_system (syc$ic_disk_error,
                'ERR=VEOS9301- A critical system task has encountered an unavailable volume');
          iop$enable_all_disk_units (status);
        IFEND;
      IFEND;
    ELSE
      tmp$set_monitor_flag (xcb_p^.global_task_id, mmc$mf_volume_unavailable, status);
    IFEND;

    IF reset_p_register THEN
      tmp$reissue_monitor_request;
    IFEND;

  PROCEND mmp$process_volume_unavailable;
?? TITLE := 'MMP$PROCESS_VOLUME_AVAILABLE', EJECT ??

  PROCEDURE [XDCL] mmp$volume_available;

    VAR
      taskid: ost$global_task_id;

    WHILE (mmv$volume_wait_queue.head <> 0) DO
      tmp$dequeue_task (mmv$volume_wait_queue, taskid);
    WHILEND;

  PROCEND mmp$volume_available;

?? TITLE := 'MMP$INCLUDE_P_REG_IN_DUMP', EJECT ??

  PROCEDURE [XDCL] mmp$include_p_reg_in_dump;

    PROCEDURE set_page
      (    ste_p: ^mmt$segment_descriptor;
           offset: integer);

      IF (ste_p^.ste.asid <> 0) AND (offset >= 0) AND (offset <= 7fffffff(16)) THEN
        sva.asid := ste_p^.ste.asid;
        sva.offset := offset;
        #HASH_SVA (sva, pti, count, found);
        IF found THEN
          pfti := (mmv$pt_p^ [pti].rma * 512) DIV osv$page_size;
          mmv$pages_to_dump_p^ [pfti] := TRUE;
        IFEND;
      IFEND;

    PROCEND set_page;

    VAR
      found: boolean,
      sva: ost$system_virtual_address,
      pti: integer,
      count: integer,
      cst_index: integer,
      rma: integer,
      ste_p: ^mmt$segment_descriptor,
      pfti: mmt$page_frame_index,
      xcb_p: ^ost$execution_control_block;

    IF mmv$pages_to_dump_p <> NIL THEN
      FOR cst_index := LOWERBOUND (mtv$cst0) TO UPPERBOUND (mtv$cst0) DO
        IF mtv$cst0 [cst_index].xcb_p <> NIL THEN
          xcb_p := mtv$cst0 [cst_index].xcb_p;
          #real_memory_address (xcb_p, rma);
          IF rma >= 0 THEN
            ste_p := mmp$get_sdt_entry_p (xcb_p, xcb_p^.xp.p_register.pva.seg);
            set_page (ste_p, xcb_p^.xp.p_register.pva.offset);
            set_page (ste_p, xcb_p^.xp.p_register.pva.offset + osv$page_size);
            set_page (ste_p, xcb_p^.xp.p_register.pva.offset - osv$page_size);
          IFEND;
        IFEND;
      FOREND;
    IFEND;

  PROCEND mmp$include_p_reg_in_dump;
?? OLDTITLE ??
MODEND mmm$page_fault_processor;
