*copyc osd$default_pragmats
?? TITLE := 'RHFAM/VE : Monitor Processing' ??
?? NEWTITLE := '  Common Decks' ??
MODULE rfm$request_processing_mtr;


*copyc rft$rb_queue_data_fragments
?? EJECT ??
*copyc i$real_memory_address
*copyc mmp$build_lock_rma_list
*copyc mmp$xtask_pva_to_sva
*copyc mmp$unlock_rma_list
*copyc mtp$error_stop
*copyc ost$global_task_id
*copyc osv$page_size
*copyc rft$r1_interface_defs
*copyc rft$status_response_pending
*copyc tmp$check_taskid
*copyc tmp$set_task_ready
*copyc tmp$set_system_flag
*copyc syt$monitor_status
?? TITLE := '  Mainframe Global Variables' ??
?? EJECT ??

{
{    The following global definition is used to globally identify the RHFAM/VE
{    system task for use by monitor.
{

  VAR
      rfv$system_task_id: [XDCL, #GATE] ost$global_task_id := [0,0];

  VAR
      rfv$response_processor: [XDCL, #GATE] iot$response_processor := ^rfp$io_complete_processor;

  VAR
      rfv$pp_interface_error: [XDCL, #GATE] rft$pp_interface_error := [0,0];

  VAR
      rfv$status_response_pending: [XDCL, #GATE] rft$status_response_pending := NIL;

  VAR
      rfv$response_seq_number: INTEGER := 0;
?? TITLE := '  RFP$IO_COMPLETE_PROCESSOR' ??
?? EJECT ??
  PROCEDURE  [XDCL] rfp$io_complete_processor(pp_response: ^iot$pp_response;
                                  detailed_status: ^iot$detailed_status;
                                  logical_pp: 1..ioc$pp_count;
                              VAR status: syt$monitor_status);

*copyc rfh$io_complete_processor


    VAR
        command_buffer: ^ARRAY [rft$command_entry] OF rft$command,
        current_in_ptr,
        current_out_ptr: rft$command_entry,
        request: ^rft$peripheral_request,
        request_response_buffer: ^rft$request_response_buffer,
        detailed_status_entry: ^rft$detailed_status,
        detail_status: ^iot$detailed_status,
        io_type: iot$io_function,
        ignore_status: syt$monitor_status;


    detail_status := detailed_status;
    status.normal := TRUE;

    CASE  pp_response^.response_code.primary_response  OF

    = ioc$unsolicited_response =

      {  An unsolicited response should only be returned if there is an error encountered by the
      {  PP, when the PP is validating its PP interface table entry and unit interface table entry.
      {  The unsolicited response code is placed in a global variable and the system task is
      {  READYed.  The system task will abort with an invalid status.  Since the system task aborts
      {  if any PP reports an interface error, it is only necessary to capture the first error.

      IF  (rfv$system_task_id.index = 0)  AND  (rfv$system_task_id.seqno = 0)  THEN

        {  A PP response should not be given if the system task is not running  }

        mtp$error_stop('RF - INVALID UNSOLICITED RESPONSE');
      ELSE
        IF  rfv$pp_interface_error.interface_error_code <> 0  THEN
          rfv$pp_interface_error.pp_number := logical_pp;
          rfv$pp_interface_error.interface_error_code := pp_response^.interface_error_code;
          tmp$check_taskid(rfv$system_task_id, tmc$opt_return, ignore_status);
          IF ignore_status.normal THEN
            tmp$set_task_ready(rfv$system_task_id, 0 {readying_task_priority},
              tmc$rc_ready_conditional_wi);
          IFEND;
        IFEND;
      IFEND;

    = ioc$normal_response, ioc$abnormal_response =

      {  For either of these response types the response is simply copied into
      {  the specified wired area and the queuing task is restarted to
      {  perform the response processing.

      request := pp_response^.request^.device_request_p;
      request_response_buffer := request^.request_buffer_ptr;
      request_response_buffer^.response := pp_response^;
      IF  request_response_buffer^.response.response_code.secondary_response = 1  THEN {detailed status}
        RESET  detail_status;
        NEXT  detailed_status_entry IN detail_status;
        IF  detailed_status_entry <> NIL  THEN
          request_response_buffer^.detailed_status := detailed_status_entry^;
        IFEND;
      IFEND;
      command_buffer := #LOC(request_response_buffer^.command_buffer);
      IF  (command_buffer^[rfc$cbi_unit_request_1].lc_function_code = rfc$lc_send_data)  OR
          (command_buffer^[rfc$cbi_unit_request_1].lc_function_code = rfc$lc_receive_data)  THEN
        IF  command_buffer^[rfc$cbi_unit_request_1].lc_function_code = rfc$lc_send_data  THEN
          io_type := ioc$explicit_write;
        ELSE
          io_type := ioc$explicit_read;
        IFEND;
        current_in_ptr := (command_buffer^[rfc$cbi_in_pointer].bp_offset DIV 8) + rfc$cbi_first_io_entry;

        {   This pointer is updated on terminating responses to prevent the request queueing routine
        {   from adding additional entries after the request has completed.

        request_response_buffer^.previous_in_ptr := current_in_ptr;
        current_out_ptr := (command_buffer^[rfc$cbi_out_pointer].bp_offset DIV 8) + rfc$cbi_first_io_entry;
        unlock_pages(command_buffer, request_response_buffer^.previous_out_ptr, current_out_ptr, io_type);

        {   This pointer is only updated by this routine.  This is used to detect the amount of
        {   data that has actually been transferred.

        request_response_buffer^.previous_out_ptr := current_out_ptr;

        {  NOTE - on a terminating response the pages that have been locked but not transferred to/from
        {         must be unlocked.

        unlock_pages(command_buffer, current_out_ptr, current_in_ptr, ioc$no_io);
      ELSEIF  (command_buffer^[rfc$cbi_unit_request_1].lc_function_code = rfc$lc_status_nad)  THEN
        rfv$status_response_pending^[request_response_buffer^.request_id.ring_3_id.nad].in_host := TRUE;
      IFEND;
      rfv$response_seq_number := rfv$response_seq_number + 1;
      request_response_buffer^.response_seq_number := rfv$response_seq_number;
      #SPOIL(request_response_buffer^.response_seq_number, request_response_buffer^.response_posted);
      request_response_buffer^.response_posted := TRUE;
      IF  request_response_buffer^.asynchronous_request  THEN
        tmp$set_system_flag(request_response_buffer^.task_id, rfc$pp_response_available, ignore_status);
      ELSE
        tmp$check_taskid(request_response_buffer^.task_id, tmc$opt_return, ignore_status);
        IF ignore_status.normal THEN
          tmp$set_task_ready(request_response_buffer^.task_id, 0 {readying_task_priority},
             tmc$rc_ready_conditional_wi);
        IFEND;
      IFEND;

    = ioc$intermediate_response =

      {  This response is only given when a send data or receive data request
      {  has room for more data addresses to be placed in the request buffer.
      {  No data is passed to the task.  The task is simply restarted and it
      {  is up to that task to determine what actions, if any, should be taken.

      request := pp_response^.request^.device_request_p;
      request_response_buffer := request^.request_buffer_ptr;
      command_buffer := #LOC(request_response_buffer^.command_buffer);
      IF  (command_buffer^[rfc$cbi_unit_request_1].lc_function_code = rfc$lc_send_data)  OR
          (command_buffer^[rfc$cbi_unit_request_1].lc_function_code = rfc$lc_receive_data)  THEN
        IF  command_buffer^[rfc$cbi_unit_request_1].lc_function_code = rfc$lc_send_data  THEN
          io_type := ioc$explicit_write;
        ELSE
          io_type := ioc$explicit_read;
        IFEND;
        current_out_ptr := (command_buffer^[rfc$cbi_out_pointer].bp_offset DIV 8) + rfc$cbi_first_io_entry;
        unlock_pages(command_buffer, request_response_buffer^.previous_out_ptr, current_out_ptr, io_type);

        {   This pointer is only updated by this routine.  This is used to detect the amount of
        {   data that has actually been transferred.

        request_response_buffer^.previous_out_ptr := current_out_ptr;
      IFEND;
      IF  request_response_buffer^.asynchronous_request  THEN
        tmp$set_system_flag(request_response_buffer^.task_id, rfc$pp_response_available, ignore_status);
      ELSE
        tmp$check_taskid(request_response_buffer^.task_id, tmc$opt_return, ignore_status);
        IF ignore_status.normal THEN
          tmp$set_task_ready(request_response_buffer^.task_id, 0 {readying_task_priority},
             tmc$rc_ready_conditional_wi);
        IFEND;
      IFEND;

    ELSE

      {  If we ever get here, some super-natural event probably occurred.  For
      {  now we will stop the system.  We may do something more fault tolerant
      {  in the future.

      mtp$error_stop('RF - INAVLID PP RESPONSE');

    CASEND;

  PROCEND rfp$io_complete_processor;
?? NEWTITLE := '    UNLOCK_PAGES' ??
?? EJECT ??
  PROCEDURE unlock_pages(command_buffer: ^ARRAY [rft$command_entry] of rft$command;
                         first_entry_index: rft$command_entry;
                         last_entry_index: rft$command_entry;
                         io_type: iot$io_function);

{    The purpose of this procedure is to unlock pages after the I/O transfer has completed.
{
{    command_buffer: (input) This parameter specifies a pointer to the command buffer which
{      contains the RMA entries.
{
{    first_entry_index: (input) This parameter specifies the index of the first entry to be
{      unlocked.
{
{    last_entry_index: (input) This parameter specifies the index (+ 1) of the the last
{      entry to be unlocked.
{
{    io_type: (input) This parameter specifies the type of I/O that was actually performed.

    VAR
        io_error: iot$io_error,
        io_id: mmt$io_identifier,
        actual_rma_list: ARRAY [rfc$cbi_first_io_entry..rfc$cbi_last_io_entry] OF mmt$rma_list_entry,
        rma_list: ^mmt$rma_list,
        rma_list_length: 0..mmc$max_rma_list_length,
        ignore_status: syt$monitor_status,
        number_of_entries: 0..rfc$command_buffer_size,
        fragment,
        current_entry_index: rft$command_entry;


    io_id.specified := FALSE;
    io_id.io_function := io_type;
    current_entry_index := first_entry_index;
    rma_list := #LOC(actual_rma_list);

    WHILE  current_entry_index <> last_entry_index  DO
      number_of_entries := (command_buffer^[current_entry_index].sf_length DIV 8) - 1;
      current_entry_index := current_entry_index + 1;
      IF  current_entry_index >= rfc$cbi_limit_pointer  THEN
        current_entry_index := rfc$cbi_first_io_entry;
      IFEND;
      rma_list_length := 0;
      FOR  fragment := 1 TO number_of_entries  DO
        IF  NOT command_buffer^[current_entry_index].wired  THEN
          rma_list_length := rma_list_length + 1;
          rma_list^[rma_list_length].length := command_buffer^[current_entry_index].re_length;
          rma_list^[rma_list_length].rma := command_buffer^[current_entry_index].re_address;
        IFEND;
        current_entry_index := current_entry_index + 1;
        IF  current_entry_index >= rfc$cbi_limit_pointer  THEN
          current_entry_index := rfc$cbi_first_io_entry;
        IFEND;
      FOREND;
      IF  rma_list_length <> 0  THEN
        io_error := ioc$no_error;
        mmp$unlock_rma_list(io_type, rma_list, rma_list_length, io_id, {MF_JOB_FILE} FALSE,
               io_error, ignore_status);
      IFEND;
    WHILEND;


  PROCEND unlock_pages;
?? OLDTITLE ??
?? TITLE := '  RFP$QUEUE_DATA_FRAGMENTS' ??
?? EJECT ??
  PROCEDURE [XDCL] rfp$queue_data_fragments(VAR request_block: rft$rb_queue_data_fragments);

*copyc rfh$queue_data_fragments


    VAR
        pva: ^cell,
        sva: ost$system_virtual_address,
        io_identifier: mmt$io_identifier,
        buffer_descriptor: mmt$buffer_descriptor,
        actual_rma_list: ARRAY [rfc$cbi_first_io_entry..rfc$cbi_last_io_entry] OF mmt$rma_list_entry,
        rma_list,
        current_rma_list: ^mmt$rma_list,
        rma_entry,
        rma_list_total_count,
        rma_list_count: 0..mmc$max_rma_list_length,
        command_buff: ^ARRAY [rft$command_entry] OF rft$command,
        entry_index,
        fragment_count: 0 .. rfc$command_buffer_size,
        save_old_in_pointer,
        save_last_block_pointer,
        buffer_index: rft$command_entry,
        request_buffer: ^rft$request_response_buffer,
        command_entry: rft$command,
        ignore_status,
        monitor_status: syt$monitor_status,
        data_length: rft$transfer_length,
        first_page_locked,
        pages_locked_in_block: BOOLEAN,
        previous_segment: ost$segment,
        page_size: ost$page_size;


    monitor_status.normal := TRUE;
    io_identifier.specified := FALSE;
    io_identifier.io_function := ioc$no_io;
    page_size := osv$page_size;
    request_buffer := request_block.request_buffer;
    rma_list_total_count := 0;
    rma_list := #LOC(actual_rma_list);
    command_buff := #LOC(request_buffer^.command_buffer);
    buffer_descriptor.buffer_descriptor_type := mmc$bd_explicit_io;
    first_page_locked := TRUE;

  /process_request/
    BEGIN

      save_old_in_pointer := request_buffer^.previous_in_ptr;
      save_last_block_pointer := request_buffer^.previous_in_ptr;
      buffer_index := request_buffer^.previous_in_ptr;
      #SPOIL(command_buff^);
      IF  (NOT request_block.clear_complete_flag) AND
          (command_buff^[rfc$cbi_unit_request_1].lc_flags.pp_process_complete)  THEN

        {  If we get here, the PP has completed the request and the previous in pointer is no
        {  longer valid.  We will return a normal status and let the user determine appropriate
        {  action based on the PP response.

        EXIT /process_request/;
      IFEND;
      FOR  entry_index := 1 TO request_block.number_of_blocks  DO
        fragment_count := (command_buff^[buffer_index].sf_length DIV 8) - 1;
        buffer_index := buffer_index + 1;
        IF  buffer_index >= rfc$cbi_limit_pointer  THEN
          buffer_index := rfc$cbi_first_io_entry;
        IFEND;
        pages_locked_in_block := FALSE;
        WHILE  fragment_count > 0  DO
          IF  NOT command_buff^[buffer_index].wired  THEN
            IF  pages_locked_in_block  THEN

              {  The RHFAM/VE ring 1 and monitor mode code were designed to support multiple
              {  user fragments aligned in any fashion.  Unfortunately this design is not
              {  currently supported by the memory management lock and unlock rma lists.
              {  Some work must be done in RHFAM/VE to make sure that each RMA list
              {  returned by the lock program is given back to the unlock program verbatim.

              mtp$error_stop('RF - MULTI-FRAGMENT FEATURE DISABLED');
            ELSE
              pages_locked_in_block := TRUE;
            IFEND;
            pva := command_buff^[command_buff^[buffer_index].pe_pva_index].pv_pva;

            IF  (first_page_locked)  OR
                (previous_segment <> #SEGMENT(pva))  OR
                (sva.offset <> #OFFSET(pva))         THEN
              first_page_locked := FALSE;
              previous_segment := #SEGMENT(pva);
              mmp$xtask_pva_to_sva(pva, sva, monitor_status);
              IF  NOT monitor_status.normal  THEN
                EXIT /process_request/;
              IFEND;
            IFEND;

            buffer_descriptor.sva := sva;
            data_length := command_buff^[buffer_index].pe_length;
            buffer_descriptor.page_count := (((sva.offset MOD page_size) + data_length - 1) DIV page_size)
              + 1;
            rma_list_count := ((sva.offset + data_length + (page_size * 2) - 1) DIV page_size) -
              ((sva.offset + page_size) DIV page_size);
            current_rma_list := #LOC(rma_list^[rma_list_total_count+1]);
            mmp$build_lock_rma_list(buffer_descriptor, data_length, request_block.io_type,
              current_rma_list, rma_list_count, monitor_status);
            IF  NOT monitor_status.normal  THEN
              EXIT /process_request/;
            IFEND;
            sva.offset := sva.offset + data_length;
            rma_list_total_count := rma_list_total_count + rma_list_count;
            rma_entry := 1;
            WHILE  rma_entry <= rma_list_count  DO
              command_entry.wired := FALSE;
              command_entry.re_length := current_rma_list^[rma_entry].length;
              command_entry.re_address := current_rma_list^[rma_entry].rma;
              command_buff^[buffer_index] := command_entry;
              rma_entry := rma_entry + 1;
              fragment_count := fragment_count - 1;
              buffer_index := buffer_index + 1;
              IF  buffer_index >= rfc$cbi_limit_pointer  THEN
                buffer_index := rfc$cbi_first_io_entry;
              IFEND;
            WHILEND;
          ELSE
            fragment_count := fragment_count - 1;
            buffer_index := buffer_index + 1;
            IF  buffer_index >= rfc$cbi_limit_pointer  THEN
              buffer_index := rfc$cbi_first_io_entry;
            IFEND;
          IFEND;
        WHILEND;
        save_last_block_pointer := buffer_index;
      FOREND;

      {   This pointer is updated in this routine after the data is queued.  The response processor
      {   may reset this pointer if a terminating PP response was received.  Therefore this pointer
      {   must be set before the PP processing complete flag is checked.

      request_buffer^.previous_in_ptr := buffer_index;
      #SPOIL(request_buffer^.previous_in_ptr);
      IF  request_block.clear_complete_flag  THEN
        request_buffer^.response_posted := FALSE;

        {   This pointer is only updated in this routine (when the page locking code is in use).

        command_buff^[rfc$cbi_in_pointer].bp_offset := (buffer_index - rfc$cbi_first_io_entry) * 8;
        #SPOIL(command_buff^);
        command_buff^[rfc$cbi_unit_request_1].lc_flags.pp_process_complete := FALSE;
      ELSE
        #SPOIL(command_buff^);
        IF  command_buff^[rfc$cbi_unit_request_1].lc_flags.pp_process_complete  THEN

          {   If the PP processing had completed before the additional buffers could be queued,
          {   the pages just locked must be unlocked.  A normal status is returned and
          {   the calling program must determine if the pages are to be requeued.

          unlock_pages(command_buff, save_old_in_pointer, save_last_block_pointer, ioc$no_io);
        ELSE

          {   This pointer is only updated in this routine (when the page locking code is in use).

          command_buff^[rfc$cbi_in_pointer].bp_offset := (buffer_index - rfc$cbi_first_io_entry) * 8;
          #SPOIL(command_buff^);

          {  The following test is used as a fail safe for multi-processor environments.  There is
          {  a possibility (none with today's implementation) that a PP response could be received and
          {  the IN pointer be reset before this routine updated the IN pointer offset.  Therefore
          {  the following test is made to make sure that all pages are unlocked if the above
          {  scenario would ever occur.  A normal status is returned and the calling program must
          {  determine if the pages are to be requeued.

          IF  ((request_buffer^.previous_in_ptr - rfc$cbi_first_io_entry) * 8) <>
               (command_buff^[rfc$cbi_in_pointer].bp_offset)  THEN
            unlock_pages(command_buff, save_old_in_pointer, save_last_block_pointer, ioc$no_io);
          IFEND;
        IFEND;
      IFEND;
    END /process_request/;

    IF  monitor_status.normal  THEN
      request_block.status.normal := TRUE;
    ELSE
      IF  (rma_list_total_count <> 0)  THEN
        unlock_pages(command_buff, save_old_in_pointer, save_last_block_pointer, ioc$no_io);
      IFEND;
      request_block.status := monitor_status;
    IFEND;

  PROCEND rfp$queue_data_fragments;

MODEND rfm$request_processing_mtr;
